filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
sequence | variablearg
sequence | constarg
sequence | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
ipfs.go | package blockfrost
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"path/filepath"
"sync"
"github.com/hashicorp/go-retryablehttp"
)
const (
resourceIPFSAdd = "ipfs/add"
resourceIPFSPin = "ipfs/pin/add"
resourceIPFSPinList = "ipfs/pin/list"
resourceIPFSPinRemove = "ipfs/pin/remove"
resourceIPFSGateway = "ipfs/gateway"
)
type ipfsClient struct {
server string
projectId string
client *retryablehttp.Client
routines int
}
type IPFSClientOptions struct {
// The project_id to use from blockfrost. If not set
// `BLOCKFROST_IPFS_PROJECT_ID` is loaded from env
ProjectID string
// Configures server to use. Can be toggled for test servers
Server string
// Interface implementing Do method such *http.Client
Client *retryablehttp.Client
// Max goroutines to use for *All Methods
MaxRoutines int
}
// IPFSObject contains information on an IPFS object
type IPFSObject struct {
// Name of the file
Name string `json:"name"`
// IPFS hash of the file
IPFSHash string `json:"ipfs_hash"`
// IPFS node size in Bytes
Size string `json:"size"`
}
// IPFSPinnedObject contains information on a pinned object
type IPFSPinnedObject struct {
// Creation time of the IPFS object on our backends
TimeCreated int `json:"time_created"`
// Pin time of the IPFS object on our backends
TimePinned int `json:"time_pinned"`
// IPFS hash of the pinned object
IPFSHash string `json:"ipfs_hash"`
// Size of the object in Bytes
State string `json:"state"`
// State of the pinned object, which is queued when we are retriving object.
// If this is successful the state is changed to pinned or failed if not.
// The state gc means the pinned item has been garbage collected due to account being
// over storage quota or after it has been moved to unpinned state by removing the object pin.
//
// Value: "queued|pinned|unpinned|failed|gc"
Size string `json:"size"`
}
// NewIPFSClient creates and returns an IPFS client configured using
// IPFSClientOptions. It will initialize the client with default options
// if provided with empty options
func NewIPFSClient(options IPFSClientOptions) IPFSClient {
if options.Server == "" {
options.Server = IPFSNet
}
retryclient := retryablehttp.NewClient()
retryclient.Logger = nil
if options.ProjectID == "" {
options.ProjectID = os.Getenv("BLOCKFROST_IPFS_PROJECT_ID")
}
if options.MaxRoutines == 0 {
options.MaxRoutines = 10
}
client := &ipfsClient{
server: options.Server,
client: retryclient,
projectId: options.ProjectID,
routines: options.MaxRoutines,
}
return client
}
type IPFSClient interface {
Add(ctx context.Context, filePath string) (IPFSObject, error)
Pin(ctx context.Context, path string) (IPFSPinnedObject, error)
PinnedObject(ctx context.Context, path string) (IPFSPinnedObject, error)
PinnedObjects(ctx context.Context, query APIQueryParams) ([]IPFSPinnedObject, error)
Remove(ctx context.Context, path string) (IPFSObject, error)
Gateway(ctx context.Context, path string) ([]byte, error)
PinnedObjectsAll(ctx context.Context) <-chan PinnedObjectResult
}
// PinnedObjectResult contains response and error from an All method
type PinnedObjectResult struct {
Res []IPFSPinnedObject
Err error
}
// Add a file to IPFS storage
// You need to Pin an object to avoid it being garbage collected.
func (ip *ipfsClient) Add(ctx context.Context, filePath string) (ipo IPFSObject, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s", ip.server, resourceIPFSAdd))
if err != nil {
return
}
file, err := os.Open(filePath)
if err != nil {
return
}
defer file.Close()
body := &bytes.Buffer{}
wr := multipart.NewWriter(body)
part, err := wr.CreateFormFile("file", filepath.Base(filePath))
if err != nil {
return
}
if _, err = io.Copy(part, file); err != nil {
return
}
if err = wr.Close(); err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestUrl.String(), body)
if err != nil {
return
}
req.Header.Add("Content-Type", wr.FormDataContentType())
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
if err = json.NewDecoder(res.Body).Decode(&ipo); err != nil {
return
}
return ipo, nil
}
// PinnedObjectsAll gets all pinned objects. Returns a channel that can be used with range
func (ip *ipfsClient) PinnedObjectsAll(ctx context.Context) <-chan PinnedObjectResult {
ch := make(chan PinnedObjectResult, ip.routines)
jobs := make(chan methodOptions, ip.routines)
quit := make(chan bool, ip.routines)
wg := sync.WaitGroup{}
for i := 0; i < ip.routines; i++ {
wg.Add(1)
go func(jobs chan methodOptions, ch chan PinnedObjectResult, wg *sync.WaitGroup) {
defer wg.Done()
for j := range jobs {
objs, err := ip.PinnedObjects(j.ctx, j.query)
if len(objs) != j.query.Count || err != nil {
quit <- true
}
res := PinnedObjectResult{Res: objs, Err: err}
ch <- res
}
}(jobs, ch, &wg)
}
go func() {
defer close(ch)
fetchScripts := true
for i := 1; fetchScripts; i++ {
select {
case <-quit:
fetchScripts = false
return
default:
jobs <- methodOptions{ctx: ctx, query: APIQueryParams{Count: 100, Page: i}}
}
}
wg.Wait()
}()
return ch
}
// Pin an object to avoid it being garbage collected
func (ip *ipfsClient) Pin(ctx context.Context, IPFSPath string) (ipo IPFSPinnedObject, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s/%s", ip.server, resourceIPFSPin, IPFSPath))
if err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestUrl.String(), nil)
if err != nil {
return
}
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
if err = json.NewDecoder(res.Body).Decode(&ipo); err != nil {
return
}
return ipo, nil
}
// PinnedObject returns information about locally pinned IPFS object
func (ip *ipfsClient) PinnedObject(ctx context.Context, IPFSPath string) (ipo IPFSPinnedObject, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s/%s", ip.server, resourceIPFSPinList, IPFSPath))
if err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestUrl.String(), nil)
if err != nil {
return
}
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
if err = json.NewDecoder(res.Body).Decode(&ipo); err != nil {
return
}
return ipo, nil
}
// PinnedObjects returns information about locally pinned IPFS objects. Returns
// a slice of IPFSPinnedObject(s) whose quantity and offset is controlled by
// query parameters
func (ip *ipfsClient) PinnedObjects(ctx context.Context, query APIQueryParams) (ipos []IPFSPinnedObject, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s", ip.server, resourceIPFSPinList))
if err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestUrl.String(), nil)
if err != nil {
return
}
v := req.URL.Query()
v = formatParams(v, query)
req.URL.RawQuery = v.Encode()
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
if err = json.NewDecoder(res.Body).Decode(&ipos); err != nil {
return
}
return ipos, nil
}
// Remove - removes pinned objects from local storage. Returns and IPFSObject
// containing removed object information
func (ip *ipfsClient) Remove(ctx context.Context, IPFSPath string) (ipo IPFSObject, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s/%s", ip.server, resourceIPFSPinRemove, IPFSPath))
if err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestUrl.String(), nil)
if err != nil {
return
}
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
if err = json.NewDecoder(res.Body).Decode(&ipo); err != nil {
return
}
return ipo, nil
}
// Gateway retrieves an object from the IFPS gateway and returns a byte
// (useful if you do not want to rely on a public gateway, such as `ipfs.blockfrost.dev`).
func (ip *ipfsClient) Gateway(ctx context.Context, IPFSPath string) (ipo []byte, err error) {
requestUrl, err := url.Parse(fmt.Sprintf("%s/%s/%s", ip.server, resourceIPFSGateway, IPFSPath))
if err != nil {
return
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestUrl.String(), nil)
if err != nil {
return
}
res, err := ip.handleRequest(req)
if err != nil {
return
}
defer res.Body.Close()
byteObj, err := ioutil.ReadAll(res.Body)
if err != nil {
return
}
return byteObj, nil
}
| [
"\"BLOCKFROST_IPFS_PROJECT_ID\""
] | [] | [
"BLOCKFROST_IPFS_PROJECT_ID"
] | [] | ["BLOCKFROST_IPFS_PROJECT_ID"] | go | 1 | 0 | |
libcontainer/init_linux.go | // +build linux
package libcontainer
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"unsafe"
"github.com/containerd/console"
"github.com/opencontainers/runc/libcontainer/capabilities"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
type initType string
const (
initSetns initType = "setns"
initStandard initType = "standard"
)
type pid struct {
Pid int `json:"pid"`
PidFirstChild int `json:"pid_first"`
}
// network is an internal struct used to setup container networks.
type network struct {
configs.Network
// TempVethPeerName is a unique temporary veth peer name that was placed into
// the container's namespace.
TempVethPeerName string `json:"temp_veth_peer_name"`
}
// initConfig is used for transferring parameters from Exec() to Init()
type initConfig struct {
Args []string `json:"args"`
Env []string `json:"env"`
Cwd string `json:"cwd"`
Capabilities *configs.Capabilities `json:"capabilities"`
ProcessLabel string `json:"process_label"`
AppArmorProfile string `json:"apparmor_profile"`
NoNewPrivileges bool `json:"no_new_privileges"`
User string `json:"user"`
AdditionalGroups []string `json:"additional_groups"`
Config *configs.Config `json:"config"`
Networks []*network `json:"network"`
PassedFilesCount int `json:"passed_files_count"`
ContainerId string `json:"containerid"`
Rlimits []configs.Rlimit `json:"rlimits"`
CreateConsole bool `json:"create_console"`
ConsoleWidth uint16 `json:"console_width"`
ConsoleHeight uint16 `json:"console_height"`
RootlessEUID bool `json:"rootless_euid,omitempty"`
RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
SpecState *specs.State `json:"spec_state,omitempty"`
}
type initer interface {
Init() error
}
func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, fifoFd, logFd int) (initer, error) {
var config *initConfig
if err := json.NewDecoder(pipe).Decode(&config); err != nil {
return nil, err
}
if err := populateProcessEnvironment(config.Env); err != nil {
return nil, err
}
switch t {
case initSetns:
return &linuxSetnsInit{
pipe: pipe,
consoleSocket: consoleSocket,
config: config,
logFd: logFd,
}, nil
case initStandard:
return &linuxStandardInit{
pipe: pipe,
consoleSocket: consoleSocket,
parentPid: unix.Getppid(),
config: config,
fifoFd: fifoFd,
logFd: logFd,
}, nil
}
return nil, fmt.Errorf("unknown init type %q", t)
}
// populateProcessEnvironment loads the provided environment variables into the
// current processes's environment.
func populateProcessEnvironment(env []string) error {
for _, pair := range env {
p := strings.SplitN(pair, "=", 2)
if len(p) < 2 {
return fmt.Errorf("invalid environment '%v'", pair)
}
if err := os.Setenv(p[0], p[1]); err != nil {
return err
}
}
return nil
}
// finalizeNamespace drops the caps, sets the correct user
// and working dir, and closes any leaked file descriptors
// before executing the command inside the namespace
func finalizeNamespace(config *initConfig) error {
// Ensure that all unwanted fds we may have accidentally
// inherited are marked close-on-exec so they stay out of the
// container
if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
return errors.Wrap(err, "close exec fds")
}
// we only do chdir if it's specified
doChdir := config.Cwd != ""
if doChdir {
// First, attempt the chdir before setting up the user.
// This could allow us to access a directory that the user running runc can access
// but the container user cannot.
err := unix.Chdir(config.Cwd)
switch {
case err == nil:
doChdir = false
case os.IsPermission(err):
// If we hit an EPERM, we should attempt again after setting up user.
// This will allow us to successfully chdir if the container user has access
// to the directory, but the user running runc does not.
// This is useful in cases where the cwd is also a volume that's been chowned to the container user.
default:
return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err)
}
}
caps := &configs.Capabilities{}
if config.Capabilities != nil {
caps = config.Capabilities
} else if config.Config.Capabilities != nil {
caps = config.Config.Capabilities
}
w, err := capabilities.New(caps)
if err != nil {
return err
}
// drop capabilities in bounding set before changing user
if err := w.ApplyBoundingSet(); err != nil {
return errors.Wrap(err, "apply bounding set")
}
// preserve existing capabilities while we change users
if err := system.SetKeepCaps(); err != nil {
return errors.Wrap(err, "set keep caps")
}
if err := setupUser(config); err != nil {
return errors.Wrap(err, "setup user")
}
// Change working directory AFTER the user has been set up, if we haven't done it yet.
if doChdir {
if err := unix.Chdir(config.Cwd); err != nil {
return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err)
}
}
if err := system.ClearKeepCaps(); err != nil {
return errors.Wrap(err, "clear keep caps")
}
if err := w.ApplyCaps(); err != nil {
return errors.Wrap(err, "apply caps")
}
return nil
}
// setupConsole sets up the console from inside the container, and sends the
// master pty fd to the config.Pipe (using cmsg). This is done to ensure that
// consoles are scoped to a container properly (see runc#814 and the many
// issues related to that). This has to be run *after* we've pivoted to the new
// rootfs (and the users' configuration is entirely set up).
func setupConsole(socket *os.File, config *initConfig, mount bool) error {
defer socket.Close()
// At this point, /dev/ptmx points to something that we would expect. We
// used to change the owner of the slave path, but since the /dev/pts mount
// can have gid=X set (at the users' option). So touching the owner of the
// slave PTY is not necessary, as the kernel will handle that for us. Note
// however, that setupUser (specifically fixStdioPermissions) *will* change
// the UID owner of the console to be the user the process will run as (so
// they can actually control their console).
pty, slavePath, err := console.NewPty()
if err != nil {
return err
}
// After we return from here, we don't need the console anymore.
defer pty.Close()
if config.ConsoleHeight != 0 && config.ConsoleWidth != 0 {
err = pty.Resize(console.WinSize{
Height: config.ConsoleHeight,
Width: config.ConsoleWidth,
})
if err != nil {
return err
}
}
// Mount the console inside our rootfs.
if mount {
if err := mountConsole(slavePath); err != nil {
return err
}
}
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(socket, pty.Name(), pty.Fd()); err != nil {
return err
}
// Now, dup over all the things.
return dupStdio(slavePath)
}
// syncParentReady sends to the given pipe a JSON payload which indicates that
// the init is ready to Exec the child process. It then waits for the parent to
// indicate that it is cleared to Exec.
func syncParentReady(pipe io.ReadWriter) error {
// Tell parent.
if err := writeSync(pipe, procReady); err != nil {
return err
}
// Wait for parent to give the all-clear.
return readSync(pipe, procRun)
}
// syncParentHooks sends to the given pipe a JSON payload which indicates that
// the parent should execute pre-start hooks. It then waits for the parent to
// indicate that it is cleared to resume.
func syncParentHooks(pipe io.ReadWriter) error {
// Tell parent.
if err := writeSync(pipe, procHooks); err != nil {
return err
}
// Wait for parent to give the all-clear.
return readSync(pipe, procResume)
}
// setupUser changes the groups, gid, and uid for the user inside the container
func setupUser(config *initConfig) error {
// Set up defaults.
defaultExecUser := user.ExecUser{
Uid: 0,
Gid: 0,
Home: "/",
}
passwdPath, err := user.GetPasswdPath()
if err != nil {
return err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return err
}
execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
if err != nil {
return err
}
var addGroups []int
if len(config.AdditionalGroups) > 0 {
addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath)
if err != nil {
return err
}
}
// Rather than just erroring out later in setuid(2) and setgid(2), check
// that the user is mapped here.
if _, err := config.Config.HostUID(execUser.Uid); err != nil {
return errors.New("cannot set uid to unmapped user in user namespace")
}
if _, err := config.Config.HostGID(execUser.Gid); err != nil {
return errors.New("cannot set gid to unmapped user in user namespace")
}
if config.RootlessEUID {
// We cannot set any additional groups in a rootless container and thus
// we bail if the user asked us to do so. TODO: We currently can't do
// this check earlier, but if libcontainer.Process.User was typesafe
// this might work.
if len(addGroups) > 0 {
return errors.New("cannot set any additional groups in a rootless container")
}
}
// Before we change to the container's user make sure that the processes
// STDIO is correctly owned by the user that we are switching to.
if err := fixStdioPermissions(config, execUser); err != nil {
return err
}
setgroups, err := ioutil.ReadFile("/proc/self/setgroups")
if err != nil && !os.IsNotExist(err) {
return err
}
// This isn't allowed in an unprivileged user namespace since Linux 3.19.
// There's nothing we can do about /etc/group entries, so we silently
// ignore setting groups here (since the user didn't explicitly ask us to
// set the group).
allowSupGroups := !config.RootlessEUID && string(bytes.TrimSpace(setgroups)) != "deny"
if allowSupGroups {
suppGroups := append(execUser.Sgids, addGroups...)
if err := unix.Setgroups(suppGroups); err != nil {
return err
}
}
if err := system.Setgid(execUser.Gid); err != nil {
return err
}
if err := system.Setuid(execUser.Uid); err != nil {
return err
}
// if we didn't get HOME already, set it based on the user's HOME
if envHome := os.Getenv("HOME"); envHome == "" {
if err := os.Setenv("HOME", execUser.Home); err != nil {
return err
}
}
return nil
}
// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
// The ownership needs to match because it is created outside of the container and needs to be
// localized.
func fixStdioPermissions(config *initConfig, u *user.ExecUser) error {
var null unix.Stat_t
if err := unix.Stat("/dev/null", &null); err != nil {
return err
}
for _, fd := range []uintptr{
os.Stdin.Fd(),
os.Stderr.Fd(),
os.Stdout.Fd(),
} {
var s unix.Stat_t
if err := unix.Fstat(int(fd), &s); err != nil {
return err
}
// Skip chown of /dev/null if it was used as one of the STDIO fds.
if s.Rdev == null.Rdev {
continue
}
// We only change the uid owner (as it is possible for the mount to
// prefer a different gid, and there's no reason for us to change it).
// The reason why we don't just leave the default uid=X mount setup is
// that users expect to be able to actually use their console. Without
// this code, you couldn't effectively run as a non-root user inside a
// container and also have a console set up.
if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil {
// If we've hit an EINVAL then s.Gid isn't mapped in the user
// namespace. If we've hit an EPERM then the inode's current owner
// is not mapped in our user namespace (in particular,
// privileged_wrt_inode_uidgid() has failed). In either case, we
// are in a configuration where it's better for us to just not
// touch the stdio rather than bail at this point.
if err == unix.EINVAL || err == unix.EPERM {
continue
}
return err
}
}
return nil
}
// setupNetwork sets up and initializes any network interface inside the container.
func setupNetwork(config *initConfig) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.initialize(config); err != nil {
return err
}
}
return nil
}
func setupRoute(config *configs.Config) error {
for _, config := range config.Routes {
_, dst, err := net.ParseCIDR(config.Destination)
if err != nil {
return err
}
src := net.ParseIP(config.Source)
if src == nil {
return fmt.Errorf("Invalid source for route: %s", config.Source)
}
gw := net.ParseIP(config.Gateway)
if gw == nil {
return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
}
l, err := netlink.LinkByName(config.InterfaceName)
if err != nil {
return err
}
route := &netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
Dst: dst,
Src: src,
Gw: gw,
LinkIndex: l.Attrs().Index,
}
if err := netlink.RouteAdd(route); err != nil {
return err
}
}
return nil
}
func setupRlimits(limits []configs.Rlimit, pid int) error {
for _, rlimit := range limits {
if err := system.Prlimit(pid, rlimit.Type, unix.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil {
return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err)
}
}
return nil
}
const _P_PID = 1
//nolint:structcheck,unused
type siginfo struct {
si_signo int32
si_errno int32
si_code int32
// below here is a union; si_pid is the only field we use
si_pid int32
// Pad to 128 bytes as detailed in blockUntilWaitable
pad [96]byte
}
// isWaitable returns true if the process has exited false otherwise.
// Its based off blockUntilWaitable in src/os/wait_waitid.go
func isWaitable(pid int) (bool, error) {
si := &siginfo{}
_, _, e := unix.Syscall6(unix.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), unix.WEXITED|unix.WNOWAIT|unix.WNOHANG, 0, 0)
if e != 0 {
return false, os.NewSyscallError("waitid", e)
}
return si.si_pid != 0, nil
}
// isNoChildren returns true if err represents a unix.ECHILD (formerly syscall.ECHILD) false otherwise
func isNoChildren(err error) bool {
switch err := err.(type) {
case unix.Errno:
if err == unix.ECHILD {
return true
}
case *os.SyscallError:
if err.Err == unix.ECHILD {
return true
}
}
return false
}
// signalAllProcesses freezes then iterates over all the processes inside the
// manager's cgroups sending the signal s to them.
// If s is SIGKILL then it will wait for each process to exit.
// For all other signals it will check if the process is ready to report its
// exit status and only if it is will a wait be performed.
func signalAllProcesses(m cgroups.Manager, s os.Signal) error {
var procs []*os.Process
if err := m.Freeze(configs.Frozen); err != nil {
logrus.Warn(err)
}
pids, err := m.GetAllPids()
if err != nil {
if err := m.Freeze(configs.Thawed); err != nil {
logrus.Warn(err)
}
return err
}
for _, pid := range pids {
p, err := os.FindProcess(pid)
if err != nil {
logrus.Warn(err)
continue
}
procs = append(procs, p)
if err := p.Signal(s); err != nil {
logrus.Warn(err)
}
}
if err := m.Freeze(configs.Thawed); err != nil {
logrus.Warn(err)
}
subreaper, err := system.GetSubreaper()
if err != nil {
// The error here means that PR_GET_CHILD_SUBREAPER is not
// supported because this code might run on a kernel older
// than 3.4. We don't want to throw an error in that case,
// and we simplify things, considering there is no subreaper
// set.
subreaper = 0
}
for _, p := range procs {
if s != unix.SIGKILL {
if ok, err := isWaitable(p.Pid); err != nil {
if !isNoChildren(err) {
logrus.Warn("signalAllProcesses: ", p.Pid, err)
}
continue
} else if !ok {
// Not ready to report so don't wait
continue
}
}
// In case a subreaper has been setup, this code must not
// wait for the process. Otherwise, we cannot be sure the
// current process will be reaped by the subreaper, while
// the subreaper might be waiting for this process in order
// to retrieve its exit code.
if subreaper == 0 {
if _, err := p.Wait(); err != nil {
if !isNoChildren(err) {
logrus.Warn("wait: ", err)
}
}
}
}
return nil
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
awx/settings/production.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Production settings for AWX project.
# Python
import os
import copy
import errno
import sys
import traceback
# Django Split Settings
from split_settings.tools import optional, include
# Load default settings.
from defaults import * # NOQA
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SQL_DEBUG = DEBUG
# Clear database settings to force production environment to define them.
DATABASES = {}
# Clear the secret key to force production environment to define it.
SECRET_KEY = None
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Production should only use minified JS for UI.
USE_MINIFIED_JS = True
# URL used by inventory script and callback plugin to access API.
INTERNAL_API_URL = 'http://127.0.0.1:80'
# Absolute filesystem path to the directory for job status stdout
# This directory should not be web-accessible
JOBOUTPUT_ROOT = '/var/lib/awx/job_status/'
# The heartbeat file for the tower scheduler
SCHEDULE_METADATA_LOCATION = '/var/lib/awx/.tower_cycle'
# Ansible base virtualenv paths and enablement
BASE_VENV_PATH = "/var/lib/awx/venv"
ANSIBLE_VENV_PATH = os.path.join(BASE_VENV_PATH, "ansible")
# Tower base virtualenv paths and enablement
AWX_VENV_PATH = os.path.join(BASE_VENV_PATH, "awx")
AWX_ISOLATED_USERNAME = 'awx'
LOGGING['handlers']['tower_warnings']['filename'] = '/var/log/tower/tower.log'
LOGGING['handlers']['callback_receiver']['filename'] = '/var/log/tower/callback_receiver.log'
LOGGING['handlers']['task_system']['filename'] = '/var/log/tower/task_system.log'
LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver.log'
LOGGING['handlers']['management_playbooks']['filename'] = '/var/log/tower/management_playbooks.log'
LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log'
LOGGING['handlers']['rbac_migrations']['filename'] = '/var/log/tower/tower_rbac_migrations.log'
# Supervisor service name dictionary used for programatic restart
SERVICE_NAME_DICT = {
"beat": "awx-celery-beat",
"celery": "awx-celery",
"callback": "awx-callback-receiver",
"channels": "awx-channels-worker",
"uwsgi": "awx-uwsgi",
"daphne": "awx-daphne"}
# Store a snapshot of default settings at this point before loading any
# customizable config files.
DEFAULTS_SNAPSHOT = {}
this_module = sys.modules[__name__]
for setting in dir(this_module):
if setting == setting.upper():
DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting))
# Load settings from any .py files in the global conf.d directory specified in
# the environment, defaulting to /etc/tower/conf.d/.
settings_dir = os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/')
settings_files = os.path.join(settings_dir, '*.py')
# Load remaining settings from the global settings file specified in the
# environment, defaulting to /etc/tower/settings.py.
settings_file = os.environ.get('AWX_SETTINGS_FILE',
'/etc/tower/settings.py')
# Attempt to load settings from /etc/tower/settings.py first, followed by
# /etc/tower/conf.d/*.py.
try:
include(settings_file, optional(settings_files), scope=locals())
except ImportError:
traceback.print_exc()
sys.exit(1)
except IOError:
from django.core.exceptions import ImproperlyConfigured
included_file = locals().get('__included_file__', '')
if (not included_file or included_file == settings_file):
# The import doesn't always give permission denied, so try to open the
# settings file directly.
try:
e = None
open(settings_file)
except IOError as e:
pass
if e and e.errno == errno.EACCES:
SECRET_KEY = 'permission-denied'
LOGGING = {}
else:
msg = 'No AWX configuration found at %s.' % settings_file
msg += '\nDefine the AWX_SETTINGS_FILE environment variable to '
msg += 'specify an alternate path.'
raise ImproperlyConfigured(msg)
else:
raise
| [] | [] | [
"AWX_SETTINGS_DIR",
"AWX_SETTINGS_FILE"
] | [] | ["AWX_SETTINGS_DIR", "AWX_SETTINGS_FILE"] | python | 2 | 0 | |
app-services/cloud-event-transforms/main.go | //
// Copyright (c) 2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"context"
"errors"
"fmt"
"os"
"github.com/edgexfoundry/go-mod-core-contracts/models"
cloudevents "github.com/cloudevents/sdk-go"
localtransforms "github.com/edgexfoundry-holding/app-service-examples/app-services/cloud-event-transforms/pkg/transforms"
"github.com/edgexfoundry/app-functions-sdk-go/appcontext"
"github.com/edgexfoundry/app-functions-sdk-go/appsdk"
)
const (
serviceKey = "cloudEventTransform"
)
func main() {
// First thing to do is to create an instance of the EdgeX SDK and initialize it.
edgexSdk := &appsdk.AppFunctionsSDK{ServiceKey: serviceKey}
original, _ := disableSecureStore()
defer resetSecureStoreEnv(edgexSdk, original)
if err := edgexSdk.Initialize(); err != nil {
message := fmt.Sprintf("SDK initialization failed: %v", err)
if edgexSdk.LoggingClient != nil {
edgexSdk.LoggingClient.Error(message)
} else {
edgexSdk.LoggingClient.Info(message)
}
os.Exit(-1)
}
// Setup cloudEvent client
ctx := context.Background()
c, err := cloudevents.NewDefaultClient()
if err != nil {
edgexSdk.LoggingClient.Error(fmt.Sprintf("failed to create client, %v", err))
}
// Start cloudEvent receiver server.
// This would probably be a different process running on a different host but
// this is just to illustrate sending and receiving of cloudevents events
go func() {
edgexSdk.LoggingClient.Info("will listen on :8080")
edgexSdk.LoggingClient.Error(fmt.Sprintf("failed to start receiver: %s", c.StartReceiver(ctx, gotEvent)))
}()
// Setup pipeline. The event our first function will get is a edgex event, this
// will be transformed to a cloudevent and sent on the next function. The next function, sendCloudEvent
// will send the event. Then the next function will transform the event back to an EdgeX event. The last
// function will simply print the original event to the console
edgexSdk.SetFunctionsPipeline(
localtransforms.NewConversion().TransformToCloudEvent,
sendCloudEvents,
localtransforms.NewConversion().TransformFromCloudEvent,
printToConsole,
)
// Lastly, we'll go ahead and tell the SDK to "start" and begin listening for events
err = edgexSdk.MakeItRun()
if err != nil {
edgexSdk.LoggingClient.Error("MakeItRun returned error: ", err.Error())
os.Exit(-1)
}
// Do any required cleanup here
os.Exit(0)
}
// function called when the cloudevent receiver gets an event
func gotEvent(ctx context.Context, event cloudevents.Event) {
var readingData string
event.DataAs(&readingData)
fmt.Printf("CloudEvent received reading value: %v\n", readingData)
}
// App function to send the cloudevent to the receiver
func sendCloudEvents(edgexcontext *appcontext.Context, params ...interface{}) (bool, interface{}) {
edgexcontext.LoggingClient.Info("sendCloudEvent")
if len(params) < 1 {
return false, errors.New("No Event Received")
}
events, ok := params[0].([]cloudevents.Event)
if !ok {
return false, errors.New("Cloud event not received")
}
ctx := cloudevents.ContextWithTarget(context.Background(), "http://localhost:8080/")
ctx = cloudevents.ContextWithHeader(ctx, "demo", "header value")
c, err := cloudevents.NewDefaultClient()
if err != nil {
edgexcontext.LoggingClient.Error(fmt.Sprintf("failed to create client, %v", err))
return false, nil
}
for _, cloudevent := range events {
if _, resp, err := c.Send(ctx, cloudevent); err != nil {
edgexcontext.LoggingClient.Error(fmt.Sprintf("failed to send: %v", err))
return false, nil
} else if resp != nil {
// don't need a response back, in this example we aren't expecting/sending one
edgexcontext.LoggingClient.Info(fmt.Sprintf("got back a response: %s", resp))
}
}
return true, events
}
func printToConsole(edgexcontext *appcontext.Context, params ...interface{}) (bool, interface{}) {
edgexcontext.LoggingClient.Info("PrintToConsole")
if len(params) < 1 {
// We didn't receive a result
return false, nil
}
edgexEvent := params[0].(models.Event)
edgexcontext.LoggingClient.Info(fmt.Sprintf("Original EdgexEvent after it was transformed into a cloudEvent and then back to an EdgexEvent: %v", edgexEvent))
return false, nil
}
// helper function to disable security
func disableSecureStore() (origEnv string, err error) {
origEnv = os.Getenv("EDGEX_SECURITY_SECRET_STORE")
err = os.Setenv("EDGEX_SECURITY_SECRET_STORE", "false")
return origEnv, err
}
// helper function to reset security related env vars
func resetSecureStoreEnv(edgexSdk *appsdk.AppFunctionsSDK, origEnv string) {
if err := os.Setenv("EDGEX_SECURITY_SECRET_STORE", origEnv); err != nil {
edgexSdk.LoggingClient.Error("Failed to set env variable: EDGEX_SECURITY_SECRET_STORE back to original value")
}
}
| [
"\"EDGEX_SECURITY_SECRET_STORE\""
] | [] | [
"EDGEX_SECURITY_SECRET_STORE"
] | [] | ["EDGEX_SECURITY_SECRET_STORE"] | go | 1 | 0 | |
server.go | package main
import (
"bones/config"
"bones/db/sqlrepositories"
"bones/repositories"
"bones/web/authentication"
"bones/web/filters"
"bones/web/handlers"
"bones/web/sessions"
"bones/web/templating"
"github.com/gorilla/pat"
"log"
"net/http"
"os"
)
// Router
var r *pat.Router
// Services
var templateRenderer handlers.TemplateRenderer
var shortcuts handlers.Shortcuts
var authenticator handlers.Authenticator
var sessionStore handlers.SessionStore
// Repositories
var userRepository repositories.UserRepository
// Filters
var f *filters.Filters
// Handlers
var homeHandler *handlers.HomeHandler
var loginHandler *handlers.LoginHandler
var signupHandler *handlers.SignupHandler
func main() {
sqlrepositories.Connect(config.DatabaseConnectionString())
defer sqlrepositories.Cleanup()
sessions.Enable()
setupDependencies()
http.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(http.Dir("./assets"))))
http.HandleFunc("/favicon.ico", func(req http.ResponseWriter, res *http.Request) {
http.ServeFile(req, res, "./assets/images/favicon.png")
})
http.HandleFunc("/robots.txt", func(req http.ResponseWriter, res *http.Request) {
http.ServeFile(req, res, "./assets/robots.txt")
})
setupRouting()
http.Handle("/", r)
port := portFromEnvOrDefault()
log.Println("Starting server on port", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func setupDependencies() {
r = pat.New()
handlers.SetRouter(r)
userRepository := sqlrepositories.NewUserRepository()
authenticator = &authentication.EmailAuthenticator{userRepository}
sessionStore = &sessions.CookieSessionStore{}
templateRenderer = templating.NewTemplateRenderer()
shortcuts = handlers.Shortcuts{templateRenderer, sessionStore}
f = &filters.Filters{shortcuts, sessionStore, userRepository}
homeHandler = &handlers.HomeHandler{shortcuts, userRepository}
loginHandler = &handlers.LoginHandler{shortcuts, authenticator, userRepository, sessionStore}
signupHandler = &handlers.SignupHandler{shortcuts, userRepository}
}
func setupRouting() {
r.Get("/users/{id:[0-9]+}/profile", filters.ApplyTo(loginHandler.LoadUserProfilePage, f.Authenticate, filters.Params)).Name("userProfile")
r.Get("/signup", signupHandler.LoadSignupPage)
r.Post("/signup", signupHandler.CreateNewUser)
r.Get("/login", loginHandler.LoadLoginPage)
r.Post("/login", filters.ApplyTo(loginHandler.CreateNewSession, f.Csrf))
r.Get("/logout", loginHandler.Logout)
r.Get("/", homeHandler.LoadHomePage)
}
func portFromEnvOrDefault() string {
port := os.Getenv("PORT")
if port != "" {
return port
}
return "8080"
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aa_hu_pair.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/internal/diskutil_test.go | package internal
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
const (
lsblkOutput1 = `NAME="sda" KNAME="sda" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="0" STATE="running" FSTYPE="" SERIAL="" PARTLABEL=""
NAME="sda1" KNAME="sda1" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="0" STATE="" FSTYPE="" SERIAL="" PARTLABEL="BIOS-BOOT"
`
lsblkOutput2 = `NAME="sdc" KNAME="sdc" ROTA="1" TYPE="disk" SIZE="62914560000" MODEL="VBOX HARDDISK" VENDOR="ATA" RO="0" RM="1" STATE="running" FSTYPE="ext4" SERIAL=""
NAME="sdc3" KNAME="sdc3" ROTA="1" TYPE="part" SIZE="62913494528" MODEL="" VENDOR="" RO="0" RM="1" STATE="" FSTYPE="ext4" SERIAL=""
`
)
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fmt.Fprintf(os.Stdout, os.Getenv("STDOUT"))
}
func TestListBlockDevices(t *testing.T) {
testcases := []struct {
label string
execOutput string
totalBlockDevices int
totalBadRows int
expected []BlockDevice
}{
{
label: "Case 1",
execOutput: lsblkOutput1,
totalBlockDevices: 2,
totalBadRows: 0,
expected: []BlockDevice{
{
Name: "sda",
FSType: "",
Type: "disk",
Size: "62914560000",
Model: "VBOX HARDDISK",
Vendor: "ATA",
Serial: "",
Rotational: "1",
ReadOnly: "0",
Removable: "0",
State: "running",
PartLabel: "",
},
{
Name: "sda1",
FSType: "",
Type: "part",
Size: "62913494528",
Model: "",
Vendor: "",
Serial: "",
Rotational: "1",
ReadOnly: "0",
Removable: "0",
State: "running",
PartLabel: "BIOS-BOOT",
},
},
},
{
label: "Case 2",
execOutput: lsblkOutput2,
totalBlockDevices: 2,
totalBadRows: 0,
expected: []BlockDevice{
{
Name: "sdc",
FSType: "ext4",
Type: "disk",
Size: "62914560000",
Model: "VBOX HARDDISK",
Vendor: "ATA",
Serial: "",
Rotational: "1",
ReadOnly: "0",
Removable: "1",
State: "running",
PartLabel: "",
},
{
Name: "sdc3",
FSType: "ext4",
Type: "part",
Size: "62913494528",
Model: "",
Vendor: "",
Serial: "",
Rotational: "1",
ReadOnly: "0",
Removable: "1",
State: "running",
PartLabel: "",
},
},
},
{
label: "Case 3",
execOutput: "",
totalBlockDevices: 0,
totalBadRows: 0,
expected: []BlockDevice{},
},
}
for _, tc := range testcases {
ExecResult = tc.execOutput
ExecCommand = FakeExecCommand
defer func() { ExecCommand = exec.Command }()
blockDevices, badRows, err := ListBlockDevices()
assert.NoError(t, err)
assert.Equalf(t, tc.totalBadRows, len(badRows), "[%s] total bad rows list didn't match", tc.label)
assert.Equalf(t, tc.totalBlockDevices, len(blockDevices), "[%s] total block device list didn't match", tc.label)
for i := 0; i < len(blockDevices); i++ {
assert.Equalf(t, tc.expected[i].Name, blockDevices[i].Name, "[%q: Device: %d]: invalid block device name", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Type, blockDevices[i].Type, "[%q: Device: %d]: invalid block device type", tc.label, i+1)
assert.Equalf(t, tc.expected[i].FSType, blockDevices[i].FSType, "[%q: Device: %d]: invalid block device file system", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Size, blockDevices[i].Size, "[%q: Device: %d]: invalid block device size", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Vendor, blockDevices[i].Vendor, "[%q: Device: %d]: invalid block device vendor", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Model, blockDevices[i].Model, "[%q: Device: %d]: invalid block device Model", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Serial, blockDevices[i].Serial, "[%q: Device: %d]: invalid block device serial", tc.label, i+1)
assert.Equalf(t, tc.expected[i].Rotational, blockDevices[i].Rotational, "[%q: Device: %d]: invalid block device rotational property", tc.label, i+1)
assert.Equalf(t, tc.expected[i].ReadOnly, blockDevices[i].ReadOnly, "[%q: Device: %d]: invalid block device read only value", tc.label, i+1)
assert.Equalf(t, tc.expected[i].PartLabel, blockDevices[i].PartLabel, "[%q: Device: %d]: invalid block device PartLabel value", tc.label, i+1)
}
}
}
func TestHasChildren(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
fakeGlobfunc func(string) ([]string, error)
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Name: "sdb", KName: "sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem", "sdb1"}, nil
},
expected: true,
},
{
label: "Case 2",
blockDevice: BlockDevice{Name: "sdb", KName: "sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem", "sdb2"}, nil
},
expected: true,
},
{
label: "Case 3",
blockDevice: BlockDevice{Name: "sdb", KName: "sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"removable", "subsytem"}, nil
},
expected: false,
},
}
for _, tc := range testcases {
FilePathGlob = tc.fakeGlobfunc
defer func() { FilePathGlob = filepath.Glob }()
actual, err := tc.blockDevice.HasChildren()
assert.NoError(t, err)
assert.Equalf(t, tc.expected, actual, "[%s]: failed to check if devie %q has child partitions", tc.label, tc.blockDevice.Name)
}
}
func TestHasBindMounts(t *testing.T) {
tempDir, err := ioutil.TempDir("", "discovery")
if err != nil {
t.Fatalf("error creating temp directory : %v", err)
}
defer os.RemoveAll(tempDir)
testcases := []struct {
label string
blockDevice BlockDevice
mountInfo string
expected bool
expectedMountPoint string
}{
{
label: "Case 1", // device with bind mounts
blockDevice: BlockDevice{Name: "sdc"},
mountInfo: "5595 121 0:6 /sdc /var/lib/kubelet/plugins/kubernetes.io~local-volume/volumeDevices/local-pv-343bdd9/6d9d33ae-408e-4bac-81f7-c0bc347a9667 rw shared:23 - devtmpfs devtmpfs rw,seclabel,size=32180404k,nr_inodes=8045101,mode=755",
expected: true,
expectedMountPoint: "/var/lib/kubelet/plugins/kubernetes.io~local-volume/volumeDevices/local-pv-343bdd9/6d9d33ae-408e-4bac-81f7-c0bc347a9667",
},
{
label: "Case 2", // device with regular mounts
blockDevice: BlockDevice{Name: "sdc"},
mountInfo: "121 98 259:1 / /boot rw,relatime shared:65 - ext4 /dev/sdc rw,seclabel",
expected: true,
expectedMountPoint: "/boot",
},
{
label: "Case 3", // device does not have mount points
blockDevice: BlockDevice{Name: "sdd"},
mountInfo: "5595 121 0:6 /sdc /var/lib/kubelet/plugins/kubernetes.io~local-volume/volumeDevices/local-pv-343bdd9/6d9d33ae-408e-4bac-81f7-c0bc347a9667 rw shared:23 - devtmpfs devtmpfs rw,seclabel,size=32180404k,nr_inodes=8045101,mode=755",
expected: false,
expectedMountPoint: "",
},
{
label: "Case 4", // device does not have mount points
blockDevice: BlockDevice{Name: "sdc"},
mountInfo: "",
expected: false,
expectedMountPoint: "",
},
}
for _, tc := range testcases {
filename := filepath.Join(tempDir, "mountfile")
err = ioutil.WriteFile(filename, []byte(tc.mountInfo), 0755)
if err != nil {
t.Fatalf("error writing mount info to file : %v", err)
}
mountFile = filename
actual, mountPoint, err := tc.blockDevice.HasBindMounts()
assert.NoError(t, err)
assert.Equalf(t, tc.expected, actual, "[%s]: failed to check bind mounts", tc.label)
assert.Equalf(t, tc.expectedMountPoint, mountPoint, "[%s]: failed to get correct mount point", tc.label)
}
}
func TestHasChildrenFail(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
fakeGlobfunc func(string) ([]string, error)
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Name: "sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{}, fmt.Errorf("failed to list matching files")
},
expected: false,
},
}
for _, tc := range testcases {
FilePathGlob = tc.fakeGlobfunc
defer func() { FilePathGlob = filepath.Glob }()
actual, err := tc.blockDevice.HasChildren()
assert.Error(t, err)
assert.Equalf(t, tc.expected, actual, "[%s]: failed to check if devie %q has child partitions", tc.label, tc.blockDevice.Name)
}
}
func TestGetPathByID(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
fakeGlobfunc func(string) ([]string, error)
fakeEvalSymlinkfunc func(string) (string, error)
expected string
}{
{
label: "Case 1",
blockDevice: BlockDevice{Name: "sdb", KName: "sdb", PathByID: "/dev/disk/by-id/sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/dm-home", "/dev/disk/by-id/dm-uuid-LVM-6p00g8KptCD", "/dev/disk/by-id/sdb"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sdb", nil
},
expected: "/dev/disk/by-id/sdb",
},
{
label: "Case 2",
blockDevice: BlockDevice{Name: "sdb", KName: "sdb", PathByID: ""},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sdb"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sdb", nil
},
expected: "/dev/disk/by-id/sdb",
},
}
for _, tc := range testcases {
FilePathGlob = tc.fakeGlobfunc
FilePathEvalSymLinks = tc.fakeEvalSymlinkfunc
defer func() {
FilePathGlob = filepath.Glob
FilePathEvalSymLinks = filepath.EvalSymlinks
}()
actual, err := tc.blockDevice.GetPathByID()
assert.NoError(t, err)
assert.Equalf(t, tc.expected, actual, "[%s] failed to get device path by ID", tc.label)
}
}
func TestGetPathByIDFail(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
fakeGlobfunc func(string) ([]string, error)
fakeEvalSymlinkfunc func(string) (string, error)
expected string
}{
{
label: "Case 1",
blockDevice: BlockDevice{Name: "sdb"},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{}, fmt.Errorf("failed to list matching files")
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "/dev/disk/by-id/sdb", nil
},
expected: "",
},
{
label: "Case 2",
blockDevice: BlockDevice{Name: "sdb", PathByID: ""},
fakeGlobfunc: func(name string) ([]string, error) {
return []string{"/dev/disk/by-id/sdb"}, nil
},
fakeEvalSymlinkfunc: func(path string) (string, error) {
return "", fmt.Errorf("failed to evaluate symlink")
},
expected: "",
},
}
for _, tc := range testcases {
FilePathGlob = tc.fakeGlobfunc
FilePathEvalSymLinks = tc.fakeEvalSymlinkfunc
defer func() {
FilePathGlob = filepath.Glob
FilePathEvalSymLinks = filepath.EvalSymlinks
}()
actual, err := tc.blockDevice.GetPathByID()
assert.Error(t, err)
assert.Equalf(t, tc.expected, actual, "[%s] failed to get device path by ID", tc.label)
}
}
func TestParseBitBool(t *testing.T) {
testcases := []struct {
label string
input string
expected bool
}{
{
label: "Case 1",
input: "0",
expected: false,
},
{
label: "Case 2",
input: "",
expected: false,
},
{
label: "Case 1",
input: "1",
expected: true,
},
}
for _, tc := range testcases {
actual, err := parseBitBool(tc.input)
assert.Equalf(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.NoError(t, err)
}
}
func TestParseBitBoolFail(t *testing.T) {
testcases := []struct {
label string
input string
expected bool
}{
{
label: "Case 1",
input: "invalid input",
expected: false,
},
}
for _, tc := range testcases {
actual, err := parseBitBool(tc.input)
assert.Equal(t, actual, tc.expected, "[%s]: invalid response", tc.label)
assert.Error(t, err)
}
}
func TestReadOnly(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{ReadOnly: "0"},
expected: false,
},
{
label: "Case 2",
blockDevice: BlockDevice{ReadOnly: "1"},
expected: true,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetReadOnly()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.NoError(t, err)
}
}
func TestReadOnlyFail(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{ReadOnly: "invalid input"},
expected: false,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetReadOnly()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.Error(t, err)
}
}
func TestGetRemovable(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Removable: "0"},
expected: false,
},
{
label: "Case 2",
blockDevice: BlockDevice{Removable: "1"},
expected: true,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetRemovable()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.NoError(t, err)
}
}
func TestGetRemovableFail(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Removable: "invalid input"},
expected: false,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetRemovable()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.Error(t, err)
}
}
func TestGetRotational(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Rotational: "0"},
expected: false,
},
{
label: "Case 2",
blockDevice: BlockDevice{Rotational: "1"},
expected: true,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetRotational()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.NoError(t, err)
}
}
func TestGetRotationalFail(t *testing.T) {
testcases := []struct {
label string
blockDevice BlockDevice
expected bool
}{
{
label: "Case 1",
blockDevice: BlockDevice{Rotational: "invalid input"},
expected: false,
},
}
for _, tc := range testcases {
actual, err := tc.blockDevice.GetRotational()
assert.Equal(t, tc.expected, actual, "[%s]: invalid response", tc.label)
assert.Error(t, err)
}
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"STDOUT\""
] | [] | [
"STDOUT",
"GO_WANT_HELPER_PROCESS"
] | [] | ["STDOUT", "GO_WANT_HELPER_PROCESS"] | go | 2 | 0 | |
tests/test_sgschema.py | import os
import sys
import datetime
import unittest
from shotgun_api3 import Shotgun
import py_shotgun
url = r'https://{}.shotgunstudio.com'.format(os.environ['SHOTGUNSTUDIO'])
script_name = os.environ['PYSHOTGUN_NAME']
api_key = os.environ['PYSHOTGUN_KEY']
sg = Shotgun(
url,
script_name=script_name,
api_key=api_key)
class TestSgschema(unittest.TestCase):
def setUp(self):
import logging
self.logger = logging.getLogger(__file__)
self.logger.setLevel(logging.DEBUG)
self.asset1 = {'id':1406, 'name':"Wepon01", 'type':'Asset'}
self.event_log1 = {'attribute_name': 'sg_status_list',
'cached_display_name': None,
'created_at': datetime.datetime(2020, 12, 13, 17, 30, 55),
'description': 'David Lin changed "Status" from "rev" to "apr" on Version '
'v076_seq004_sh0010_Comp',
'entity': {'id': 19613, 'name': 'v076_seq004_sh0010_Comp', 'type': 'Version'},
'event_type': 'Shotgun_Version_Change',
'filmstrip_image': None,
'id': 2282801,
'image': None,
'image_blur_hash': None,
'image_source_entity': None,
'meta': {'attribute_name': 'sg_status_list',
'entity_id': 19613,
'entity_type': 'Version',
'field_data_type': 'status_list',
'new_value': 'apr',
'old_value': 'rev',
'type': 'attribute_change'},
'project': {'id': 91, 'name': 'Noflame_Test', 'type': 'Project'},
'session_uuid': 'd8aa7698-3d25-11eb-a9d3-0242ac110002',
'type': 'EventLogEntry',
'user': {'id': 88, 'name': 'David Lin', 'type': 'HumanUser'}}
def test_sgschema_create(self):
SGSchema = py_shotgun.SGSchema
SGSchema.set_api(sg)
Version = SGSchema.sgClasses.Version
ver = Version(self.event_log1['entity']['id'], sg, self.logger)
self.assertTrue(ver)
def test_name_attribute(self):
SGSchema = py_shotgun.SGSchema
SGSchema.set_api(sg)
Asset = SGSchema.sgClasses.Asset
Version = SGSchema.sgClasses.Version
ver = Version(self.event_log1['entity']['id'], sg, self.logger)
print(ver.name_())
ast = Asset(self.asset1['id'], sg, self.logger)
print(ast.name_())
def test_name_attribute_without_sg(self):
SGSchema = py_shotgun.SGSchema
SGSchema.set_api(sg)
Asset = SGSchema.sgClasses.Asset
Version = SGSchema.sgClasses.Version
ver = Version(self.event_log1['entity']['id'])
print(ver.name_())
ast = Asset(self.asset1['id'], sg, self.logger)
print(ast.name_())
def test_query_delete_entity(self):
SGSchema = py_shotgun.SGSchema
SGSchema.set_api(sg)
Task = SGSchema.sgClasses.Task
task = Task(66099, sg, self.logger)
print(task.name_())
| [] | [] | [
"SHOTGUNSTUDIO",
"PYSHOTGUN_KEY",
"PYSHOTGUN_NAME"
] | [] | ["SHOTGUNSTUDIO", "PYSHOTGUN_KEY", "PYSHOTGUN_NAME"] | python | 3 | 0 | |
internal/pipe/git/git.go | package git
import (
"fmt"
"net/url"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/apex/log"
"github.com/goreleaser/goreleaser/internal/git"
"github.com/goreleaser/goreleaser/internal/pipe"
"github.com/goreleaser/goreleaser/pkg/context"
)
// Pipe that sets up git state.
type Pipe struct{}
func (Pipe) String() string {
return "getting and validating git state"
}
// Run the pipe.
func (Pipe) Run(ctx *context.Context) error {
if _, err := exec.LookPath("git"); err != nil {
return ErrNoGit
}
info, err := getInfo(ctx)
if err != nil {
return err
}
ctx.Git = info
log.WithField("commit", info.Commit).WithField("latest tag", info.CurrentTag).Info("building...")
ctx.Version = strings.TrimPrefix(ctx.Git.CurrentTag, "v")
return validate(ctx)
}
// nolint: gochecknoglobals
var fakeInfo = context.GitInfo{
Branch: "none",
CurrentTag: "v0.0.0",
Commit: "none",
ShortCommit: "none",
FullCommit: "none",
Summary: "none",
}
func getInfo(ctx *context.Context) (context.GitInfo, error) {
if !git.IsRepo(ctx) && ctx.Snapshot {
log.Warn("accepting to run without a git repo because this is a snapshot")
return fakeInfo, nil
}
if !git.IsRepo(ctx) {
return context.GitInfo{}, ErrNotRepository
}
info, err := getGitInfo(ctx)
if err != nil && ctx.Snapshot {
log.WithError(err).Warn("ignoring errors because this is a snapshot")
if info.Commit == "" {
info = fakeInfo
}
return info, nil
}
return info, err
}
func getGitInfo(ctx *context.Context) (context.GitInfo, error) {
branch, err := getBranch(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current branch: %w", err)
}
short, err := getShortCommit(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current commit: %w", err)
}
full, err := getFullCommit(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get current commit: %w", err)
}
date, err := getCommitDate(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get commit date: %w", err)
}
summary, err := getSummary(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get summary: %w", err)
}
gitURL, err := getURL(ctx)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get remote URL: %w", err)
}
if strings.HasPrefix(gitURL, "https://") {
u, err := url.Parse(gitURL)
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't parse remote URL: %w", err)
}
u.User = nil
gitURL = u.String()
}
tag, err := getTag(ctx)
if err != nil {
return context.GitInfo{
Branch: branch,
Commit: full,
FullCommit: full,
ShortCommit: short,
CommitDate: date,
URL: gitURL,
CurrentTag: "v0.0.0",
Summary: summary,
}, ErrNoTag
}
subject, err := getTagWithFormat(ctx, tag, "contents:subject")
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get tag subject: %w", err)
}
contents, err := getTagWithFormat(ctx, tag, "contents")
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get tag contents: %w", err)
}
body, err := getTagWithFormat(ctx, tag, "contents:body")
if err != nil {
return context.GitInfo{}, fmt.Errorf("couldn't get tag content body: %w", err)
}
previous, err := getPreviousTag(ctx, tag)
if err != nil {
// shouldn't error, will only affect templates
log.Warnf("couldn't find any tags before %q", tag)
}
return context.GitInfo{
Branch: branch,
CurrentTag: tag,
PreviousTag: previous,
Commit: full,
FullCommit: full,
ShortCommit: short,
CommitDate: date,
URL: gitURL,
Summary: summary,
TagSubject: subject,
TagContents: contents,
TagBody: body,
}, nil
}
func validate(ctx *context.Context) error {
if ctx.Snapshot {
return pipe.ErrSnapshotEnabled
}
if ctx.SkipValidate {
return pipe.ErrSkipValidateEnabled
}
if _, err := os.Stat(".git/shallow"); err == nil {
log.Warn("running against a shallow clone - check your CI documentation at https://goreleaser.com/ci")
}
if err := CheckDirty(ctx); err != nil {
return err
}
_, err := git.Clean(git.Run(ctx, "describe", "--exact-match", "--tags", "--match", ctx.Git.CurrentTag))
if err != nil {
return ErrWrongRef{
commit: ctx.Git.Commit,
tag: ctx.Git.CurrentTag,
}
}
return nil
}
// CheckDirty returns an error if the current git repository is dirty.
func CheckDirty(ctx *context.Context) error {
out, err := git.Run(ctx, "status", "--porcelain")
if strings.TrimSpace(out) != "" || err != nil {
return ErrDirty{status: out}
}
return nil
}
func getBranch(ctx *context.Context) (string, error) {
return git.Clean(git.Run(ctx, "rev-parse", "--abbrev-ref", "HEAD", "--quiet"))
}
func getCommitDate(ctx *context.Context) (time.Time, error) {
ct, err := git.Clean(git.Run(ctx, "show", "--format='%ct'", "HEAD", "--quiet"))
if err != nil {
return time.Time{}, err
}
if ct == "" {
return time.Time{}, nil
}
i, err := strconv.ParseInt(ct, 10, 64)
if err != nil {
return time.Time{}, err
}
t := time.Unix(i, 0).UTC()
return t, nil
}
func getShortCommit(ctx *context.Context) (string, error) {
return git.Clean(git.Run(ctx, "show", "--format='%h'", "HEAD", "--quiet"))
}
func getFullCommit(ctx *context.Context) (string, error) {
return git.Clean(git.Run(ctx, "show", "--format='%H'", "HEAD", "--quiet"))
}
func getSummary(ctx *context.Context) (string, error) {
return git.Clean(git.Run(ctx, "describe", "--always", "--dirty", "--tags"))
}
func getTagWithFormat(ctx *context.Context, tag, format string) (string, error) {
out, err := git.Run(ctx, "tag", "-l", "--format='%("+format+")'", tag)
return strings.TrimSpace(strings.TrimSuffix(strings.ReplaceAll(out, "'", ""), "\n\n")), err
}
func getTag(ctx *context.Context) (string, error) {
var tag string
var err error
for _, fn := range []func() (string, error){
func() (string, error) {
return os.Getenv("GORELEASER_CURRENT_TAG"), nil
},
func() (string, error) {
return git.Clean(git.Run(ctx, "tag", "--points-at", "HEAD", "--sort", "-version:refname"))
},
func() (string, error) {
return git.Clean(git.Run(ctx, "describe", "--tags", "--abbrev=0"))
},
} {
tag, err = fn()
if tag != "" || err != nil {
return tag, err
}
}
return tag, err
}
func getPreviousTag(ctx *context.Context, current string) (string, error) {
if tag := os.Getenv("GORELEASER_PREVIOUS_TAG"); tag != "" {
return tag, nil
}
return git.Clean(git.Run(ctx, "describe", "--tags", "--abbrev=0", fmt.Sprintf("tags/%s^", current)))
}
func getURL(ctx *context.Context) (string, error) {
return git.Clean(git.Run(ctx, "ls-remote", "--get-url"))
}
| [
"\"GORELEASER_CURRENT_TAG\"",
"\"GORELEASER_PREVIOUS_TAG\""
] | [] | [
"GORELEASER_PREVIOUS_TAG",
"GORELEASER_CURRENT_TAG"
] | [] | ["GORELEASER_PREVIOUS_TAG", "GORELEASER_CURRENT_TAG"] | go | 2 | 0 | |
app/providers/mongo/utils.go | package mongo
import (
"fmt"
"log"
"os"
"github.com/globalsign/mgo"
"github.com/globalsign/mgo/bson"
"github.com/smileinnovation/imannotate/api/admin"
"github.com/smileinnovation/imannotate/api/project"
"github.com/smileinnovation/imannotate/api/user"
)
var sess *mgo.Session
func getMongo() *mgo.Database {
db := os.Getenv("DB_HOST")
dbu := os.Getenv("DB_USER")
dbp := os.Getenv("DB_PASS")
dbn := os.Getenv("DB_NAME")
admdb := os.Getenv("DB_AUTH")
if admdb == "" {
// is there is no "DB_AUTH" database name for authentication
// so we use the used db name
admdb = dbn
}
if sess == nil || sess.Ping() != nil {
var err error
c := fmt.Sprintf("%s:%s@%s:27017/%s", dbu, dbp, db, admdb)
sess, err = mgo.Dial(c)
if err != nil {
log.Fatal("DB connection error:", err)
}
}
return sess.Clone().DB(dbn)
}
func fixProjectId(p *project.Project) *project.Project {
p.Id = bson.ObjectId(p.Id).Hex()
return p
}
func fixUserId(p *user.User) *user.User {
p.ID = bson.ObjectId(p.ID).Hex()
return p
}
func canTouchProject(u *user.User, p *project.Project) bool {
if admin.Get().IsAdmin(u) {
return true
}
db := getMongo()
defer db.Session.Close()
if p.Owner == u.ID {
return true
}
return false
}
func canAnnotateProject(u *user.User, p *project.Project) bool {
if canTouchProject(u, p) {
return true
}
db := getMongo()
defer db.Session.Close()
if i, err := db.C("project_acl").Find(bson.M{
"projectId": bson.ObjectIdHex(p.Id),
"userId": bson.ObjectIdHex(u.ID),
}).Count(); err != nil || i < 1 {
return false
}
return false
}
| [
"\"DB_HOST\"",
"\"DB_USER\"",
"\"DB_PASS\"",
"\"DB_NAME\"",
"\"DB_AUTH\""
] | [] | [
"DB_HOST",
"DB_AUTH",
"DB_NAME",
"DB_PASS",
"DB_USER"
] | [] | ["DB_HOST", "DB_AUTH", "DB_NAME", "DB_PASS", "DB_USER"] | go | 5 | 0 | |
lib/clients/cryovac_clients/RGA_client.py | import numpy as np
from datetime import datetime
from twisted.internet.defer import inlineCallbacks
from EGGS_labrad.lib.clients.cryovac_clients.RGA_gui import RGA_gui
class RGA_client(RGA_gui):
name = 'RGA Client'
BUFFERID = 289961
def __init__(self, reactor, cxn=None, parent=None):
super().__init__()
self.cxn = cxn
self.gui = self
self.gui.setupUi()
self.reactor = reactor
self.servers = ['RGA Server', 'Data Vault']
# initialization sequence
d = self.connect()
d.addCallback(self.initData)
d.addCallback(self.initializeGUI)
# SETUP
@inlineCallbacks
def connect(self):
"""
Creates an asynchronous connection to labrad.
"""
# create connection to labrad manager
if not self.cxn:
import os
LABRADHOST = os.environ['LABRADHOST']
from labrad.wrappers import connectAsync
self.cxn = yield connectAsync(LABRADHOST, name=self.name)
# get servers
try:
self.dv = self.cxn.data_vault
self.reg = self.cxn.registry
self.rga = self.cxn.rga_server
except Exception as e:
print('Required servers not connected, disabling widget.')
self.setEnabled(False)
# connect to signals
# device parameters
yield self.rga.signal__buffer_update(self.BUFFERID)
yield self.rga.addListener(listener=self.updateBuffer, source=None, ID=self.BUFFERID)
# server connections
yield self.cxn.manager.subscribe_to_named_message('Server Connect', 9898989, True)
yield self.cxn.manager.addListener(listener=self.on_connect, source=None, ID=9898989)
yield self.cxn.manager.subscribe_to_named_message('Server Disconnect', 9898989 + 1, True)
yield self.cxn.manager.addListener(listener=self.on_disconnect, source=None, ID=9898989 + 1)
# set recording stuff
self.c_record = self.cxn.context()
return self.cxn
@inlineCallbacks
def initData(self, cxn):
"""
Get startup data from servers and show on GUI.
"""
# lock while starting up
self.setEnabled(False)
self.buffer_readout.appendPlainText('Initializing client...')
# lockswitches
self.gui.general_lockswitch.setChecked(True)
self.gui.ionizer_lockswitch.setChecked(True)
self.gui.detector_lockswitch.setChecked(True)
self.gui.scan_lockswitch.setChecked(True)
# ionizer
ee_val = yield self.rga.ionizer_electron_energy()
ie_val = yield self.rga.ionizer_ion_energy()
fl_val = yield self.rga.ionizer_emission_current()
vf_val = yield self.rga.ionizer_focus_voltage()
self.gui.ionizer_ee.setValue(ee_val)
self.gui.ionizer_ie.setCurrentIndex(ie_val)
self.gui.ionizer_fl.setValue(fl_val)
self.gui.ionizer_vf.setValue(vf_val)
# detector
hv_val = yield self.rga.detector_cdem_voltage()
nf_val = yield self.rga.detector_noise_floor()
self.gui.detector_hv.setValue(hv_val)
self.gui.detector_nf.setCurrentIndex(nf_val)
# scan
mi_val = yield self.rga.scan_mass_initial()
mf_val = yield self.rga.scan_mass_final()
sa_val = yield self.rga.scan_mass_steps()
self.gui.scan_mi.setValue(mi_val)
self.gui.scan_mf.setValue(mf_val)
self.gui.scan_sa.setValue(sa_val)
# unlock after startup
self.setEnabled(True)
self.buffer_readout.appendPlainText('Initialized.')
return cxn
def initializeGUI(self, cxn):
"""
Connect signals to slots and other initializations.
"""
# general
self.gui.initialize.clicked.connect(lambda: self.rga.initialize())
self.gui.calibrate_detector.clicked.connect(lambda: self.rga.detector_calibrate())
self.gui.general_tp.clicked.connect(lambda: self.rga.tpm_start())
# ionizer
self.gui.ionizer_ee.valueChanged.connect(lambda value: self.rga.ionizer_electron_energy(int(value)))
self.gui.ionizer_ie.currentIndexChanged.connect(lambda index: self.rga.ionizer_ion_energy(index))
self.gui.ionizer_fl.valueChanged.connect(lambda value: self.rga.ionizer_emission_current(value))
self.gui.ionizer_vf.valueChanged.connect(lambda value: self.rga.ionizer_focus_voltage(value))
# detector
self.gui.detector_hv.valueChanged.connect(lambda value: self.rga.detector_cdem_voltage(int(value)))
self.gui.detector_nf.currentIndexChanged.connect(lambda index: self.rga.detector_noise_floor(index))
# scan
self.gui.scan_start.clicked.connect(lambda: self.startScan())
# buffer
self.gui.buffer_clear.clicked.connect(lambda: self.gui.buffer_readout.clear())
return cxn
# SIGNALS
@inlineCallbacks
def on_connect(self, c, message):
server_name = message[1]
if server_name in self.servers:
print(server_name + ' reconnected, enabling widget.')
# get latest values
yield self.initData(self.cxn)
self.setEnabled(True)
def on_disconnect(self, c, message):
server_name = message[1]
if server_name in self.servers:
print(server_name + ' disconnected, disabling widget.')
self.setEnabled(False)
def updateBuffer(self, c, data):
"""
Updates GUI when values are received from server.
"""
param, value = data
self.gui.buffer_readout.appendPlainText('{}: {}'.format(param, value))
# SLOTS
@inlineCallbacks
def startScan(self):
"""
Creates a new dataset to record pressure and
tells polling loop to add data to data vault.
"""
# set up datavault
date = datetime.now()
year = str(date.year)
month = '{:02d}'.format(date.month)
trunk1 = '{0:s}_{1:s}_{2:02d}'.format(year, month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(self.name, date.hour, date.minute)
yield self.dv.cd(['', year, month, trunk1, trunk2], True, context=self.c_record)
yield self.dv.new('SRS RGA Scan', [('Mass', 'amu')],
[('Scan', 'Current', '1e-16 A')], context=self.c_record)
# get scan parameters from widgets
mass_initial = int(self.gui.scan_mi.value())
mass_final = int(self.gui.scan_mf.value())
mass_step = int(self.gui.scan_sa.value())
type = self.gui.scan_type.currentText()
num_scans = int(self.gui.scan_num.value())
# send scan parameters to RGA
yield self.rga.scan_mass_initial(mass_initial)
yield self.rga.scan_mass_final(mass_final)
yield self.rga.scan_mass_steps(mass_step)
# do scan
self.gui.buffer_readout.appendPlainText('Starting scan...')
self.gui.setEnabled(False)
x, y = yield self.rga.scan_start(type, num_scans)
data_tmp = np.array([x, y]).transpose()
yield self.dv.add_ex(data_tmp, context=self.c_record)
self.gui.buffer_readout.appendPlainText('Scan finished.')
self.gui.setEnabled(True)
def closeEvent(self, event):
self.cxn.disconnect()
if self.reactor.running:
self.reactor.stop()
if __name__ == "__main__":
from EGGS_labrad.lib.clients import runClient
runClient(RGA_client)
| [] | [] | [
"LABRADHOST"
] | [] | ["LABRADHOST"] | python | 1 | 0 | |
src/com.mentor.nucleus.bp.docgen/src/com/mentor/nucleus/bp/docgen/generator/Generator.java | //====================================================================
//
// File: $RCSfile: Generator.java,v $
// Version: $Revision: 1.23 $
// Modified: $Date: 2013/01/10 23:43:41 $
//
// (c) Copyright 2004-2014 by Mentor Graphics Corp. All rights reserved.
//
//====================================================================
package com.mentor.nucleus.bp.docgen.generator;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;
import org.apache.tools.ant.Task;
import org.eclipse.core.filesystem.EFS;
import org.eclipse.core.filesystem.IFileStore;
import org.eclipse.core.filesystem.IFileSystem;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.runtime.Path;
import org.eclipse.jface.dialogs.ProgressMonitorDialog;
import org.eclipse.jface.operation.IRunnableWithProgress;
import org.eclipse.ui.IEditorDescriptor;
import org.eclipse.ui.IWorkbenchPage;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.console.ConsolePlugin;
import org.eclipse.ui.console.IConsole;
import org.eclipse.ui.console.IConsoleConstants;
import org.eclipse.ui.console.IConsoleManager;
import org.eclipse.ui.console.IConsoleView;
import org.eclipse.ui.console.MessageConsole;
import org.eclipse.ui.console.MessageConsoleStream;
import org.eclipse.ui.part.FileEditorInput;
import com.mentor.nucleus.bp.core.CorePlugin;
import com.mentor.nucleus.bp.core.SystemModel_c;
import com.mentor.nucleus.bp.core.common.PersistableModelComponent;
import com.mentor.nucleus.bp.mc.AbstractActivator;
import com.mentor.nucleus.bp.mc.AbstractNature;
import com.mentor.nucleus.bp.mc.c.binary.ExportBuilder;
public class Generator extends Task {
public static final String DOCGEN_LAUNCH_ID = "DocGen document generator.launch"; //$NON-NLS-1$
public static final String XSLTPROC_LAUNCH_ID = "DocGen xsltproc.launch"; //$NON-NLS-1$
public static final String XBUILD_LAUNCH_ID = "DocGen xtumlmc build.launch"; //$NON-NLS-1$
public static final String EXTERNALTOOLBUILDER_FOLDER = ".externalToolBuilders"; //$NON-NLS-1$
public static final String CORE_ICON_DIR = "icons/metadata/"; //$NON-NLS-1$
public static final String IMAGE_DIR = "images/"; //$NON-NLS-1$
public static final String DOCGEN_DIR = "/tools/docgen/"; //$NON-NLS-1$
public static final String DOCGEN_EXE = "docgen"; //$NON-NLS-1$
public static final String XSLTPROC_EXE = "docbook/xsltproc"; //$NON-NLS-1$
public static final String DOC_DIR = "doc/"; //$NON-NLS-1$
public static final String DOCGEN_INPUT = "a.xtuml"; //$NON-NLS-1$
public static final String DOC_HTML = "doc.html"; //$NON-NLS-1$
public static final String DOC_XML = "doc.xml"; //$NON-NLS-1$
public static final String DOCGEN_XSL = "docgen.xsl"; //$NON-NLS-1$
public static final String CSSFILE = ".css"; //$NON-NLS-1$
public static final String CONSOLE_NAME = "Console"; //$NON-NLS-1$
private static final String ACTIVITY_ICON = "Activity.gif"; //$NON-NLS-1$
private static final int SLEEPTIME = 500;
private static final int KILLTIMEOUT = 20000;
public static MessageConsole myConsole;
public static MessageConsoleStream msgbuf;
public static Generator self;
public Generator() {
myConsole = findConsole(CONSOLE_NAME);
msgbuf = myConsole.newMessageStream();
}
public static void genAll(SystemModel_c sys) {
if (self == null) {
self = new Generator();
}
createDocumentation(sys);
}
/*
* The flow of this function is:
* - Run the image generator
* - Run the Model Compiler pre-builder
* - Call xtumlmc_build.exe xtumlmc_cleanse_model <model file>
* - Pass the stripped down model file to the docgen exe
* - Store the doc.xml output in the specified output folder
* - Run xsltproc to convert doc.xml into doc.html
* - Display doc.html
*/
private static void createDocumentation(final SystemModel_c sys) {
final IProject project = com.mentor.nucleus.bp.io.image.generator.Generator
.getProject(sys);
boolean failed = false;
if ( (project != null) && !failed ) {
String projPath = project.getLocation().toOSString();
final IPath path = new Path(projPath + File.separator
+ DOC_DIR);
final String destPath = path.toOSString();
ProgressMonitorDialog pmd = new ProgressMonitorDialog(PlatformUI.getWorkbench().getActiveWorkbenchWindow().getShell());
pmd.setCancelable(true);
pmd.create();
IProgressMonitor monitor = pmd.getProgressMonitor();
try {
// Make sure the settings in the launch file are up to date so
// we can invoke xtumlmc_build properly.
AbstractNature nature = null;
AbstractActivator activator = null;
if ( project.hasNature(com.mentor.nucleus.bp.mc.c.binary.MCNature.MC_NATURE_ID) ) {
nature = com.mentor.nucleus.bp.mc.c.binary.MCNature.getDefault();
activator = com.mentor.nucleus.bp.mc.c.binary.Activator.getDefault();
}
else if ( project.hasNature(com.mentor.nucleus.bp.mc.c.source.MCNature.MC_NATURE_ID) ) {
nature = com.mentor.nucleus.bp.mc.c.source.MCNature.getDefault();
activator = com.mentor.nucleus.bp.mc.c.source.Activator.getDefault();
}
else if ( project.hasNature(com.mentor.nucleus.bp.mc.cpp.source.MCNature.MC_NATURE_ID) ) {
nature = com.mentor.nucleus.bp.mc.cpp.source.MCNature.getDefault();
activator = com.mentor.nucleus.bp.mc.cpp.source.Activator.getDefault();
}
else if ( project.hasNature(com.mentor.nucleus.bp.mc.systemc.source.MCNature.MC_NATURE_ID) ) {
nature = com.mentor.nucleus.bp.mc.systemc.source.MCNature.getDefault();
activator = com.mentor.nucleus.bp.mc.systemc.source.Activator.getDefault();
}
else if ( project.hasNature(com.mentor.nucleus.bp.mc.vhdl.source.MCNature.MC_NATURE_ID) ) {
nature = com.mentor.nucleus.bp.mc.vhdl.source.MCNature.getDefault();
activator = com.mentor.nucleus.bp.mc.vhdl.source.Activator.getDefault();
}
com.mentor.nucleus.bp.mc.MCBuilderArgumentHandler argHandlerAbstract = new com.mentor.nucleus.bp.mc.MCBuilderArgumentHandler(
project, activator, nature);
argHandlerAbstract.setArguments(nature.getBuilderID());
// Next proceed with actually running docgen on the model
IWorkbenchPage page = PlatformUI.getWorkbench()
.getActiveWorkbenchWindow().getActivePage();
String id = IConsoleConstants.ID_CONSOLE_VIEW;
IConsoleView view = (IConsoleView) page.showView(id);
view.display(myConsole);
pmd.run(true, true, new IRunnableWithProgress() {
public void run(IProgressMonitor monitor)
throws InvocationTargetException,
InterruptedException {
int steps = 8;
int curStep = 1;
List<SystemModel_c> exportedSystems = new ArrayList<SystemModel_c>();
monitor.beginTask("Document Generation", steps);
if (!path.toFile().exists()) {
path.toFile().mkdir();
}
while (curStep <= steps) {
if (monitor.isCanceled()) {
InterruptedException ie = new InterruptedException("User cancelled the operation");
throw ie;
}
try {
switch (curStep) {
case 1:
monitor.subTask("Cleaning");
cleanup(destPath);
monitor.worked(1);
break;
case 2:
monitor.subTask("Loading model");
PersistableModelComponent pmc = sys.getPersistableComponent();
pmc.loadComponentAndChildren(new NullProgressMonitor());
monitor.worked(1);
break;
case 3:
monitor.subTask("Gathering model information");
ExportBuilder eb = new ExportBuilder();
exportedSystems = eb.exportSystem(sys, destPath, new NullProgressMonitor());
monitor.worked(1);
break;
case 4:
monitor.subTask("Generating images");
com.mentor.nucleus.bp.io.image.generator.Generator.genAll(sys, project);
// Now gen the images for the referred-to systems
for( SystemModel_c exportedSystem: exportedSystems ) {
com.mentor.nucleus.bp.io.image.generator.Generator.genAll(exportedSystem, project);
}
configureIcons(project, destPath);
monitor.worked(1);
break;
case 5:
monitor.subTask("Prepping model for document generation");
runXbuild(project, destPath);
monitor.worked(1);
break;
case 6:
monitor.subTask("Processing model");
runDocgen(project, destPath);
monitor.worked(1);
break;
case 7:
monitor.subTask("Generating display data");
runXsltproc(project, destPath);
monitor.worked(1);
break;
case 8:
monitor.subTask("Refreshing");
project.refreshLocal(IResource.DEPTH_INFINITE, null);
monitor.worked(1);
break;
}
} catch (Throwable e) {
RuntimeException err = new RuntimeException(e.getMessage());
throw err;
}
curStep++;
}
}
});
openOutput(project);
} catch (Throwable e) {
String errMsg = e.getMessage();
if ( (errMsg == null) || errMsg.isEmpty() ) {
Throwable cause = e.getCause();
if ( cause != null ) {
errMsg = cause.getMessage();
}
if ((cause == null) || (errMsg == null)) {
errMsg = "";
}
}
logMsg("Error. Document generation failed: " + errMsg);
CorePlugin.logError("Error. Document generation failed: " + errMsg, e);
failed = true;
} finally {
if (failed) {
try {
cleanup(destPath);
project.refreshLocal(IResource.DEPTH_INFINITE, null);
} catch (CoreException ce) {
String errMsg = ce.getMessage();
if ( (errMsg == null) || errMsg.isEmpty() ) {
errMsg = "CoreException";
}
logMsg("Error. Document generation failed during cleanup: " + errMsg);
CorePlugin.logError("Error. Document generation failed during cleanup: " + errMsg, ce);
}
} else {
logMsg("Document generation finished successfully.");
}
monitor.done();
}
}
}
private static void cleanup(String workingDir)
throws CoreException
{
/* NOTE: At initial implementation we have decide to leave the
* generated data alone so we don't delete any potential user
* created data.
*
File workDir = new File(workingDir);
File[] files = workDir.listFiles();
for (File file: files) {
if ( file.isDirectory() ) {
cleanup(file.toString());
file.delete();
} else {
file.delete();
}
}*/
}
private static void configureIcons(IProject project, String workingDir)
throws RuntimeException, CoreException, IOException
{
File imageDir = new File(workingDir + IMAGE_DIR);
File coreIconDir = new File(CorePlugin.getEntryPath(CORE_ICON_DIR));
if (!imageDir.exists()) {
RuntimeException re = new RuntimeException("Expected model images do not exist.");
throw re;
}
if (!coreIconDir.exists()) {
RuntimeException re = new RuntimeException("Could not locate icons in core plugin.");
throw re;
}
IFileSystem fileSystem = EFS.getLocalFileSystem();
IFileStore srcDir = fileSystem.getStore(coreIconDir.toURI());
IFileStore tgtDir = fileSystem.getStore(imageDir.toURI());
srcDir.copy(tgtDir, EFS.OVERWRITE, null);
project.refreshLocal(IResource.DEPTH_INFINITE, null);
File activityIcon = new File(workingDir + IMAGE_DIR + ACTIVITY_ICON);
if (!activityIcon.exists()) {
RuntimeException re = new RuntimeException("Failed to copy icons from core plugin.");
throw re;
}
}
private static void runXbuild(IProject project, String workingDir)
throws IOException, RuntimeException, CoreException, InterruptedException
{
// Call xtumlmc_build.exe xtumlmc_cleanse_model <infile> <outfile>
String app = AbstractNature.getLaunchAttribute(project,
com.mentor.nucleus.bp.mc.AbstractNature.LAUNCH_ATTR_TOOL_LOCATION);
String args = "xtumlmc_cleanse_model"; //$NON-NLS-1$
String inputfile = project.getName() + ".sql"; //$NON-NLS-1$
String middlefile = "z.xtuml"; //$NON-NLS-1$
String outputfile = DOCGEN_INPUT;
File output = new File(workingDir + outputfile);
File middle = new File(workingDir + middlefile);
File sqlfile = new File(workingDir + inputfile);
if ( middle.exists() ) {
middle.delete();
}
if ( output.exists() ) {
output.delete();
}
ProcessBuilder pb = new ProcessBuilder(app, args, inputfile, middlefile);
pb.directory(new File(workingDir));
Process process = pb.start();
process.waitFor();
project.refreshLocal(IResource.DEPTH_INFINITE, null);
if ( !middle.exists() ) {
RuntimeException re = new RuntimeException("Expected output file doesn't exist: " +
middle.toString());
throw re;
}
// Call xtumlmc_build.exe ReplaceUUIDWithLong <infile> <outfile>
args = "ReplaceUUIDWithLong"; //$NON-NLS-1$
pb = new ProcessBuilder(app, args, middlefile, outputfile);
pb.directory(new File(workingDir));
process = pb.start();
process.waitFor();
sqlfile.delete();
middle.delete();
project.refreshLocal(IResource.DEPTH_INFINITE, null);
if ( !output.exists() ) {
RuntimeException re = new RuntimeException("Expected output file doesn't exist: " +
output.toString());
throw re;
}
}
private static void runDocgen(IProject project, String workingDir)
throws IOException, RuntimeException, CoreException, InterruptedException
{
// Call docgen.exe
String homedir = System.getenv("MGC_EMBEDDED_HOME"); //$NON-NLS-1$
String app = homedir + DOCGEN_DIR + DOCGEN_EXE;
String outputfile = DOC_XML;
File output = new File(workingDir + outputfile);
File input = new File(workingDir + DOCGEN_INPUT);
if (output.exists()) {
output.delete();
}
ProcessBuilder pb = new ProcessBuilder(app);
pb.directory(new File(workingDir));
Process process = pb.start();
int exitVal = doWaitFor(process, null);
input.delete();
project.refreshLocal(IResource.DEPTH_INFINITE, null);
if ( exitVal == -1 ) {
RuntimeException re = new RuntimeException("docgen subprocess failed: " +
output.toString());
throw re;
} else if ( !output.exists() ) {
RuntimeException re = new RuntimeException("Expected output file doesn't exist: " +
output.toString());
throw re;
}
}
private static void runXsltproc(IProject project, String workDir)
throws IOException, RuntimeException, CoreException, InterruptedException
{
// Run xsltproc to convert doc.xml into doc.html
String homedir = System.getenv("MGC_EMBEDDED_HOME").replaceAll("\\\\", "/"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
String app = homedir + DOCGEN_DIR + XSLTPROC_EXE;
String xslfile = DOCGEN_XSL;
String xmlfile = DOC_XML;
String htmlfile = DOC_HTML;
String workingDir = workDir.replaceAll("\\\\", "/"); //$NON-NLS-1$ //$NON-NLS-2$
File output = new File(workingDir + htmlfile);
if (output.exists()) {
output.delete();
}
IFileSystem fileSystem = EFS.getLocalFileSystem();
IFileStore srcxsl = fileSystem.getStore(new File(homedir + DOCGEN_DIR + xslfile).toURI());
IFileStore tgtxsl = fileSystem.getStore(new File(workingDir + xslfile).toURI());
srcxsl.copy(tgtxsl, EFS.OVERWRITE, null);
IFileStore[] children = fileSystem.getStore(new File(homedir + DOCGEN_DIR).toURI()).childStores(EFS.NONE, null);
for (IFileStore child: children) {
if (child.getName().endsWith(CSSFILE)) {
IFileStore targetCss = fileSystem.getStore(new File(workingDir + child.getName()).toURI());
child.copy(targetCss, EFS.OVERWRITE, null);
}
}
ProcessBuilder pb = new ProcessBuilder(app, xslfile, xmlfile);
pb.directory(new File(workingDir));
pb.redirectErrorStream(true);
Process process = pb.start();
int exitVal = doWaitFor(process, output);
if ( exitVal == -1 ) {
RuntimeException re = new RuntimeException("xsltproc subprocess failed to create " +
output.toString());
throw re;
}
project.refreshLocal(IResource.DEPTH_INFINITE, null);
if ( !output.exists() ) {
RuntimeException re = new RuntimeException("Expected output file doesn't exist: " +
output.toString());
throw re;
}
}
private static void openOutput(IProject project)
throws IOException, RuntimeException, CoreException, InterruptedException
{
// Open the generated documentation
String htmlfile = DOC_HTML;
IFile output = project.getFile(DOC_DIR + htmlfile);
IWorkbenchPage page = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage();
IEditorDescriptor desc = PlatformUI.getWorkbench().
getEditorRegistry().getDefaultEditor(output.getName());
page.openEditor(new FileEditorInput(output), desc.getId());
}
private static MessageConsole findConsole(String name) {
ConsolePlugin plugin = ConsolePlugin.getDefault();
IConsoleManager conMan = plugin.getConsoleManager();
IConsole[] existing = conMan.getConsoles();
for (int i = 0; i < existing.length; i++) {
if (name.equals(existing[i].getName())) {
return (MessageConsole) existing[i];
}
}
// no console found, so create a new one
MessageConsole myConsole = new MessageConsole(name, null);
conMan.addConsoles(new IConsole[] { myConsole });
return myConsole;
}
private static void logMsg(String msg) {
try {
msgbuf.println(msg);
msgbuf.flush();
while(PlatformUI.getWorkbench().getDisplay().readAndDispatch());
} catch (IOException ioe) {
}
}
private static int doWaitFor(Process p, File output) {
int exitValue = -1; // returned to caller when p is finished
int totalTime = 0;
try {
BufferedReader is = new BufferedReader(new InputStreamReader(p.getInputStream()));
DataOutputStream dos = null;
if (output != null) {
dos = new DataOutputStream(
new BufferedOutputStream(new FileOutputStream(output)));
}
String line;
boolean finished = false; // Set to true when p is finished
while (!finished) {
try {
while ((line = is.readLine()) != null) {
if ( dos != null) {
dos.writeBytes(line);
dos.write('\n');
}
}
// Ask the process for its exitValue. If the process
// is not finished, an IllegalThreadStateException
// is thrown. If it is finished, we fall through and
// the variable finished is set to true.
exitValue = p.exitValue();
finished = true;
} catch (IllegalThreadStateException e) {
// Process is not finished yet;
// Sleep a little to save on CPU cycles
Thread.sleep(SLEEPTIME);
totalTime = totalTime + SLEEPTIME;
if (totalTime > KILLTIMEOUT) {
finished = true;
p.destroy();
}
}
if (dos != null) {
dos.flush();
dos.close();
}
}
} catch (Exception e) {
// unexpected exception! print it out for debugging...
System.err.println("doWaitFor(): unexpected exception - " + e.getMessage());
}
return exitValue;
}
} | [
"\"MGC_EMBEDDED_HOME\"",
"\"MGC_EMBEDDED_HOME\""
] | [] | [
"MGC_EMBEDDED_HOME"
] | [] | ["MGC_EMBEDDED_HOME"] | java | 1 | 0 | |
.github/script/build_trade-data.py | import os
import time
import datetime
import pathlib
import json
import requests
from datetime import date, timedelta, datetime
from newsapi import NewsApiClient
def send_post_json(url, req_data):
try:
headers = {'content-type': 'application/json'}
res = requests.post(url, req_data, headers=headers)
res.raise_for_status()
except Exception as ex:
print('Generated an exception: {ex}'.format(ex=ex))
return -1, ex
return 0, res.json()
if __name__ == "__main__":
DELAY_TIME_SEC = 300
SEC_RETRY_CNT = 5
SCAN_URL = "https://zmcx16.moe/stock-minehunter/api/task/do-scan"
SCREENER_URL = "https://zmcx16.moe/stock-minehunter/api/task/do-screen"
SEC_URL = "https://zmcx16.moe/stock-minehunter/api/task/get-sec-data"
FORMULA_URL = "https://zmcx16.moe/stock-minehunter/api/task/calc-formula"
NEWS_API_KEY = os.environ.get("NEWS_API_KEY", "")
script_path = pathlib.Path(__file__).parent.resolve()
input_path = script_path / '..' / '..' / 'trade-data.json'
stocks_output_path = script_path / '..' / '..' / 'zmcx16_investment-stocks-trade-data.json'
stocks_output_readable_path = script_path / '..' / '..' / 'zmcx16_investment-stocks-trade-data_readable.json'
formula_output_path = script_path / '..' / '..' / 'zmcx16_investment-formula-trade-data.json'
formula_output_readable_path = script_path / '..' / '..' / 'zmcx16_investment-formula-trade-data_readable.json'
scan_output = {"hold_stock_list": [], "star_stock_list": [], "screener_stock_list": [], "data": [], "news": {},
"SEC": {}, "Beneish_Model_v1": {}}
formula_output = {"hold_stock_list": [], "portfolio": {}, "KellyFormula_Range_v1": {}}
newsapi = NewsApiClient(api_key=NEWS_API_KEY)
with open(input_path, 'r', encoding='utf-8') as f:
data = json.loads(f.read())
# screener
screen_template = data["screen_template"].copy()
ret, resp = send_post_json(SCREENER_URL, str({"data": screen_template}))
if ret == 0:
try:
if resp["ret"] != 0:
print('server err = {err}, msg = {msg}'.format(err=resp["ret"], msg=resp["err_msg"]))
else:
scan_output["screener_stock_list"] += resp["data"][0]["result"]["symbols"]
except Exception as ex:
print('Generated an exception: {ex}, try next target.'.format(ex=ex))
# norm-minehunter
formula_output["hold_stock_list"] = scan_output["hold_stock_list"] = data["hold_stock_list"]
scan_output["star_stock_list"] = data["star_stock_list"]
scan_list = set(data["hold_stock_list"] + data["star_stock_list"] + scan_output["screener_stock_list"])
# SEC
sec_cik_table = data["sec_cik_table"]
# portfolio
formula_output["portfolio"] = data["portfolio"]
# news
news_latest_days = data["news_config"]["latest_days"]
news_max_count = data["news_config"]["max_count"]
news_exclude_domains = data["news_config"]["exclude_domains"]
news_symbol_keyword = data["news_config"]["symbol_keyword"]
for symbol in scan_list:
print("get {symbol} scan / SEC / formula output: {now}".format(symbol=symbol, now=datetime.now()))
# get norm-minehunter
scan_list_args = data["tactic_template"].copy()
for target in scan_list_args:
target["target"] = [symbol]
ret, resp = send_post_json(SCAN_URL, str({"data": scan_list_args}))
if ret == 0:
try:
if resp["ret"] != 0:
print('server err = {err}, msg = {msg}'.format(err=resp["ret"], msg=resp["err_msg"]))
else:
scan_output["data"] += resp["data"]
except Exception as ex:
print('Generated an exception: {ex}, try next target.'.format(ex=ex))
# --- get SEC -------
if (symbol in data["hold_stock_list"]):
sec_args = data["sec_template"].copy()
for sec_target in sec_args:
sec_target["target"] = [{"symbol": symbol, "CIK": sec_cik_table[symbol]}]
for retry_i in range(SEC_RETRY_CNT):
ret, resp = send_post_json(SEC_URL, str({"data": [sec_target]}))
if ret == 0:
try:
if resp["ret"] != 0:
print('server err = {err}, msg = {msg}'.format(err=resp["ret"], msg=resp["err_msg"]))
else:
scan_output["SEC"][symbol] = resp["data"][0]["report"]
break
except Exception as ex:
print('Generated an exception: {ex}, try next target.'.format(ex=ex))
print('SEC retry {retry_i} failed.'.format(retry_i=retry_i))
# --- get news ------
if (symbol in data["hold_stock_list"]) or (
symbol in data["star_stock_list"]):
query = symbol + ' stock'
if symbol in news_symbol_keyword:
query = news_symbol_keyword[symbol]
news = newsapi.get_everything(q=query,
exclude_domains=news_exclude_domains,
from_param=(datetime.now() - timedelta(days=news_latest_days)).strftime("%Y-%m-%d"),
to=date.today().strftime("%Y-%m-%d"),
sort_by='publishedAt')
if news['status'] == 'ok' and news['totalResults'] > 0:
news_temp = []
news_title = []
for article in news['articles']:
if article['title'] not in news_title:
news_temp.append(article)
news_title.append(article['title'])
scan_output["news"][symbol] = news_temp[:min(news_max_count, len(news_temp))]
# -------------------
# --- get KellyFormula ---
if symbol in data["hold_stock_list"]:
formula_list_args = data["KellyFormula_Range_v1_template"].copy()
for target in formula_list_args:
target["target"] = [symbol]
ret, resp = send_post_json(FORMULA_URL, str({"data": formula_list_args}))
if ret == 0:
try:
if resp["ret"] != 0:
print('server err = {err}, msg = {msg}'.format(err=resp["ret"], msg=resp["err_msg"]))
else:
formula_output["KellyFormula_Range_v1"][symbol] = resp["data"][0]["result"]
except Exception as ex:
print('Generated an exception: {ex}, try next target.'.format(ex=ex))
# -------------------
# --- get Beneish_Model ---
formula_list_args = data["Beneish_Model_v1_template"].copy()
for target in formula_list_args:
target["target"] = [symbol]
ret, resp = send_post_json(FORMULA_URL, str({"data": formula_list_args}))
if ret == 0:
try:
if resp["ret"] != 0:
print('server err = {err}, msg = {msg}'.format(err=resp["ret"], msg=resp["err_msg"]))
else:
scan_output["Beneish_Model_v1"][symbol] = resp["data"][0]["result"]
except Exception as ex:
print('Generated an exception: {ex}, try next target.'.format(ex=ex))
# -------------------
# -- load portfolio -
if symbol in data["hold_stock_list"]:
for stock in scan_output["data"]:
if symbol == stock["symbol"]:
price = float(stock["baseinfo"]["Price"])
formula_output["portfolio"][symbol]["profit_%"] = (price - data["portfolio"][symbol]["cost_p"]) / data["portfolio"][symbol]["cost_p"] * 100
break
# ------------------
time.sleep(DELAY_TIME_SEC)
if len(scan_output["data"]) > 0 or len(scan_output["news"]) > 0 or len(formula_output["KellyFormula_Range_v1"]) > 0 or len(formula_output["portfolio"]) > 0:
with open(stocks_output_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(scan_output, separators=(',', ':')))
with open(stocks_output_readable_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(scan_output, indent=4, sort_keys=True))
with open(formula_output_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(formula_output, separators=(',', ':')))
with open(formula_output_readable_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(formula_output, indent=4, sort_keys=True))
print("task completed: {now}".format(now=datetime.now()))
else:
print("No scan output data")
| [] | [] | [
"NEWS_API_KEY"
] | [] | ["NEWS_API_KEY"] | python | 1 | 0 | |
pypeapp/lib/mongo.py | import os
try:
from urllib.parse import urlparse, parse_qs, unquote, urlsplit, urlunsplit
except ImportError:
from urlparse import urlparse, parse_qs
class MongoEnvNotSet(Exception):
pass
def build_netloc(host, port):
# type: (str, Optional[int]) -> str
"""
Build a netloc from a host-port pair
"""
if port is None:
return host
if ':' in host:
# Only wrap host with square brackets when it is IPv6
host = '[{}]'.format(host)
return '{}:{}'.format(host, port)
def build_url_from_netloc(netloc, scheme='https'):
# type: (str, str) -> str
"""
Build a full URL from a netloc.
"""
if netloc.count(':') >= 2 and '@' not in netloc and '[' not in netloc:
# It must be a bare IPv6 address, so wrap it with brackets.
netloc = '[{}]'.format(netloc)
return '{}://{}'.format(scheme, netloc)
def parse_netloc(netloc):
# type: (str) -> Tuple[str, Optional[int]]
"""
Return the host-port pair from a netloc.
"""
url = build_url_from_netloc(netloc)
parsed = urlparse(url)
return parsed.hostname, parsed.port
def split_auth_from_netloc(netloc):
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if '@' not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit('@', 1)
if ':' in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user_pass = auth.split(':', 1)
else:
user_pass = auth, None
user_pass = tuple(
None if x is None else unquote(x) for x in user_pass
)
return netloc, user_pass
def _transform_url(url, transform_netloc):
"""Transform and replace netloc in a url.
transform_netloc is a function taking the netloc and returning a
tuple. The first element of this tuple is the new netloc. The
entire tuple is returned.
Returns a tuple containing the transformed url as item 0 and the
original tuple returned by transform_netloc as item 1.
"""
purl = urlsplit(url)
netloc_tuple = transform_netloc(purl.netloc)
# stripped url
url_pieces = (
purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment
)
surl = urlunsplit(url_pieces)
return surl, netloc_tuple
def _get_netloc(netloc):
return split_auth_from_netloc(netloc)
def _redact_netloc(netloc):
return (redact_netloc(netloc),)
def split_auth_netloc_from_url(url):
# type: (str) -> Tuple[str, str, Tuple[str, str]]
"""
Parse a url into separate netloc, auth, and url with no auth.
Returns: (url_without_auth, netloc, (username, password))
"""
url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
return url_without_auth, netloc, auth
def remove_auth_from_url(url):
# type: (str) -> str
"""Return a copy of url with 'username:password@' removed."""
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)[0]
def redact_auth_from_url(url):
# type: (str) -> str
"""Replace the password in a given url with ****."""
return _transform_url(url, _redact_netloc)[0]
def decompose_url(url):
components = {
"scheme": None,
"host": None,
"port": None,
"path": None,
"username": None,
"password": None,
"auth_db": None,
"ssl": None,
}
result = urlparse(url)
if result.scheme is None:
_url = "mongodb://{}".format(url)
result = urlparse(_url)
if result.netloc is not None:
components["scheme"]=result.scheme or "mongodb"
url_without_auth, netloc, (components["username"], components["password"])=split_auth_netloc_from_url(url)
components["host"], components["port"] = parse_netloc(netloc)
components["path"]=result.path
components["auth_db"]=parse_qs(result.query)['authSource'][0]
components["ssl"]=parse_qs(result.query)['ssl'][0]
else:
components["scheme"] = result.scheme
components["host"] = result.hostname
try:
components["port"] = result.port
except ValueError:
raise RuntimeError("invalid port specified")
components["username"] = result.username
components["password"] = result.password
try:
components["auth_db"] = parse_qs(result.query)['authSource'][0]
except KeyError:
# no auth db provided, mongo will use the one we are connecting to
pass
components["ssl"]=None
return components
def compose_url(scheme=None,
host=None,
username=None,
password=None,
port=None,
path=None,
auth_db=None,
ssl=None):
url = "{scheme}://"
if username and password:
url += "{username}:{password}@"
url += "{host}"
if port:
url += ":{port}"
if path:
url += "{path}"
if auth_db and ssl:
url += "?authSource={auth_db}&ssl=true"
elif auth_db:
url += "?authSource={auth_db}"
else:
url += "?ssl=true"
url_str=url.format(**{
"scheme": scheme,
"host": host,
"username": username,
"password": password,
"path": path,
"port": port,
"auth_db": auth_db
})
return url_str
def get_default_components():
mongo_url = os.environ.get("AVALON_MONGO")
if mongo_url is None:
raise MongoEnvNotSet(
"URL for Mongo logging connection is not set."
)
return decompose_url(mongo_url)
| [] | [] | [
"AVALON_MONGO"
] | [] | ["AVALON_MONGO"] | python | 1 | 0 | |
nemo/collections/nlp/models/language_modeling/megatron_base_model.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from omegaconf.dictconfig import DictConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common.megatron.clip_grads import clip_grad_norm_fp32
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
try:
import apex
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ["MegatronBaseModel"]
class MegatronBaseModel(NLPModel):
"""
Megatron base class
It does the following things:
1. Initialize the model parallel for nemo given the model parallel parameters.
2. Turn on all the nvidia optimizations.
3. If `cfg.tokenizer` is available, it loads the tokenizer and pad the vocab to the correct size for tensor model parallelism.
4. It help to run `configure_gradient_clipping`, if `grad_clip_pl_default` is set True, it uses the pytorch lightning default
gradient clipping. Or if `megatron_amp_o2` is set True, it uses the parameters from optimizer to clip the gradients.
Otherwise, it uses the parameters calculated in the `setup_optimizer_param_groups` method.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer, no_lm_init=True):
# FIXME: switch to self._cfg
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
# this prevents base constructor from initializing tokenizer
self.tokenizer = None
super().__init__(cfg, trainer=trainer, no_lm_init=no_lm_init)
# used in NVIDIA NGC PyTorch containers
self._enable_nvidia_optimizations()
if self._cfg.get('use_cpu_initialization', False) is False:
torch.cuda.set_device(trainer.local_rank)
# buffer used during train_step for logging average loss over gradient accumulation steps
self._reduced_loss_buffer = []
initialize_model_parallel_for_nemo(
world_size=trainer.world_size,
global_rank=trainer.global_rank,
local_rank=trainer.local_rank,
tensor_model_parallel_size=cfg.get('tensor_model_parallel_size', 1),
pipeline_model_parallel_size=cfg.get('pipeline_model_parallel_size', 1),
pipeline_model_parallel_split_rank=cfg.get('pipeline_model_parallel_split_rank', 0),
micro_batch_size=cfg.get('micro_batch_size'),
global_batch_size=cfg.get('global_batch_size'),
seed=self.cfg.get('seed', 1234),
apex_transformer_log_level=self.cfg.get('apex_transformer_log_level', 30),
)
self.grad_clip_pl_default = False # use pytorch default for gradient clipping. Default False
if hasattr(self._cfg, "tokenizer"):
# build tokenizer (defaults to nemo supported tokenizers)
self._build_tokenizer()
# manipulate vocabulary (e.g., pad vocabulary for better efficiency)
self._build_vocab()
def _enable_nvidia_optimizations(self):
"These optimizations are present in NVIDIA NGC PyTorch Containers"
# Version check
nvidia_torch_version = os.getenv('NVIDIA_PYTORCH_VERSION', None)
if nvidia_torch_version is not None:
NVIDIA_TORCH_MAJOR = int(nvidia_torch_version.split('.')[0])
try:
NVIDIA_TORCH_MINOR = int(nvidia_torch_version.split('.')[1])
except Exception:
NVIDIA_TORCH_MINOR = 0
# Apex Persistent layer norm is supported from Nvidia PyTorch container v21.11
if NVIDIA_TORCH_MAJOR < 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR < 11):
self.cfg.persist_layer_norm = False
if NVIDIA_TORCH_MAJOR >= 21 or (NVIDIA_TORCH_MAJOR == 21 and NVIDIA_TORCH_MINOR >= 11):
# NVFUSER
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._debug_set_autodiff_subgraph_inlining(False)
else:
# Not a Nvidia container. Dependency check is on users
pass
def _build_tokenizer(self):
"""
Default tokenizer is based on available nemo tokenizers.
Override this method to use an external tokenizer.
All tokenizers are expected to provide compatible interface.
Override default Encoder-decoder tokenizer to use legacy=True for sentencepiece.
"""
self.tokenizer = get_nmt_tokenizer(
library=self._cfg.tokenizer.library,
model_name=self._cfg.tokenizer.type,
tokenizer_model=self.register_artifact("tokenizer.model", self._cfg.tokenizer.model),
vocab_file=self.register_artifact("tokenizer.vocab_file", self._cfg.tokenizer.vocab_file),
merges_file=self.register_artifact("tokenizer.merge_file", self._cfg.tokenizer.merge_file),
delimiter=self.cfg.tokenizer.get('delimiter', None),
legacy=True if self._cfg.tokenizer.library == 'sentencepiece' else False,
)
def _build_vocab(self):
"""
Manipulate vocabulary (e.g., pad vocabulary for increased performance)/
"""
# TODO: add config to allow to disable it?
self.padded_vocab_size = self._vocab_size_with_padding(
orig_vocab_size=self.tokenizer.vocab_size,
make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128),
tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1),
)
def _vocab_size_with_padding(self, orig_vocab_size, make_vocab_size_divisible_by, tensor_model_parallel_size):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = make_vocab_size_divisible_by * tensor_model_parallel_size
while (after % multiple) != 0:
after += 1
logging.info(
f'Padded vocab_size: {after}, original vocab_size: {orig_vocab_size}, dummy tokens: {after - orig_vocab_size}.'
)
return after
def _get_parameters(self):
"""
private method to load all the trainable parameters from optimizer param groups
"""
params = []
for param_group in self._optimizer_param_groups:
for param in param_group['params']:
params.append(param)
return params
def configure_gradient_clipping(self, *args, **kwargs):
"""PTL hook to configure gradients.
We use gradient clipping implementation from megatron-lm.
"""
clip_val = self.trainer.gradient_clip_val
if clip_val is None:
return
clip_val = float(clip_val)
if clip_val <= 0:
return
if self.grad_clip_pl_default:
# use the default behavior
return super().configure_gradient_clipping(*args, **kwargs)
elif self.megatron_amp_o2:
# grep fp32 master parameters for gradient clipping
parameters = self._optimizer.get_parameters()
else:
parameters = self._get_parameters()
grad_norm = clip_grad_norm_fp32(parameters=parameters, max_norm=clip_val)
self.log('grad_norm', grad_norm, rank_zero_only=True)
| [] | [] | [
"NVIDIA_PYTORCH_VERSION"
] | [] | ["NVIDIA_PYTORCH_VERSION"] | python | 1 | 0 | |
cmd/minikube/cmd/start_test.go | /*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"os"
"testing"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
)
func TestGetKuberneterVersion(t *testing.T) {
var tests = []struct {
description string
expectedVersion string
paramVersion string
upgrade bool
cfg *cfg.MachineConfig
}{
{
description: "kubernetes-version not given, no config",
expectedVersion: constants.DefaultKubernetesVersion,
paramVersion: "",
upgrade: false,
},
{
description: "kubernetes-version not given, config available",
expectedVersion: "v1.15.0",
paramVersion: "",
upgrade: false,
cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}},
},
{
description: "kubernetes-version given, no config",
expectedVersion: "v1.15.0",
paramVersion: "v1.15.0",
upgrade: false,
},
{
description: "kubernetes-version given, config available",
expectedVersion: "v1.16.0",
paramVersion: "v1.16.0",
upgrade: true,
cfg: &cfg.MachineConfig{KubernetesConfig: cfg.KubernetesConfig{KubernetesVersion: "v1.15.0"}},
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
viper.SetDefault(kubernetesVersion, test.paramVersion)
version, upgrade := getKubernetesVersion(test.cfg)
// check whether we are getting the expected version
if version != test.expectedVersion {
t.Fatalf("test failed because the expected version %s is not returned", test.expectedVersion)
}
// check whether the upgrade flag is correct
if test.upgrade != upgrade {
t.Fatalf("test failed expected upgrade is %t", test.upgrade)
}
})
}
}
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
viper.SetDefault(memory, defaultMemorySize)
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
originalEnv := os.Getenv("HTTP_PROXY")
defer func() {
err := os.Setenv("HTTP_PROXY", originalEnv)
if err != nil {
t.Fatalf("Error reverting env HTTP_PROXY to it's original value. Got err: %s", err)
}
}()
k8sVersion := constants.NewestKubernetesVersion
var tests = []struct {
description string
proxy string
proxyIgnored bool
}{
{
description: "http_proxy=127.0.0.1:3128",
proxy: "127.0.0.1:3128",
proxyIgnored: true,
},
{
description: "http_proxy=localhost:3128",
proxy: "localhost:3128",
proxyIgnored: true,
},
{
description: "http_proxy=1.2.3.4:3128",
proxy: "1.2.3.4:3128",
},
{
description: "no http_proxy",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
cmd := &cobra.Command{}
if err := os.Setenv("HTTP_PROXY", test.proxy); err != nil {
t.Fatalf("Unexpected error setting HTTP_PROXY: %v", err)
}
config, err := generateCfgFromFlags(cmd, k8sVersion, "none")
if err != nil {
t.Fatalf("Got unexpected error %v during config generation", err)
}
// ignored proxy should not be in config
for _, v := range config.DockerEnv {
if v == test.proxy && test.proxyIgnored {
t.Fatalf("Value %v not expected in dockerEnv but occurred", v)
}
}
})
}
}
| [
"\"HTTP_PROXY\""
] | [] | [
"HTTP_PROXY"
] | [] | ["HTTP_PROXY"] | go | 1 | 0 | |
chat_server/chat_server/wsgi.py | """
WSGI config for chat_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chat_server.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
example_problems/pure_java/sum/services/sum_and_product/simple/sum_and_product_server.java | import java.util.Scanner;
import java.util.Map;
import java.util.Random;
public class sum_and_product_server {
public static void main(String[] args) {
Scanner scan = new Scanner(System.in);
String spoon = "";
String numbers = "";
int num_question=0;
int x = 0;
int y = 0;
Random randrange = new Random();
Map <String, String> map = System.getenv();
for (Map.Entry <String, String> entry: map.entrySet()) {
if(entry.getKey().equals("TAL_NUMBERS"))
numbers= entry.getValue();
if(entry.getKey().equals("TAL_NUM_QUESTIONS"))
num_question= Integer.parseInt(entry.getValue());
}
System.out.println(numbers);
System.out.println(num_question);
boolean gen_new_pair = true;
for (int i=0;i<num_question;i++) {
if(numbers.equals("onedigit")) {
x= randrange.nextInt(10);
y=randrange.nextInt(10);
}
else if(numbers.equals("twodigits")) {
x=randrange.nextInt(100);
y=randrange.nextInt(100);
}
else {
x=randrange.nextInt(2*32);
y=randrange.nextInt(2*32);
}
int sum = x+y;
int prod=x*y;
System.out.println(sum + " " + prod);
spoon = scan.nextLine();
while(spoon.charAt(0)=='#') {
spoon=scan.nextLine();
}
int a = Integer.parseInt(spoon.split(" ")[0]);
int b = Integer.parseInt(spoon.split(" ")[1]);
gen_new_pair = false;
if (a+b > x+y)
System.out.println("# No! indeed,"+a+ "+"+b+"="+(a+b)+">"+(x+y)+".");
else if (a+b < x+y)
System.out.println("# No! indeed,"+a+ "+"+b+"="+(a+b)+"<"+(x+y)+".");
else if (a*b > x*y)
System.out.println("# No! indeed,"+a+ "*"+b+"="+(a*b)+">"+(x*y)+".");
else if (a*b < x*y)
System.out.println("# No! indeed,"+a+ "*"+b+"="+(a*b)+">"+(x*y)+".");
else {
System.out.println("# Ok! indeed,"+a+ "+"+b+"="+(a+b)+" and"+a+"*"+b+"="+(a*b)+".");
gen_new_pair = false;
}
}
System.exit(0);
}
}
/*
if gen_new_pair:
if ENV['numbers'] == "onedigit":
x = randrange(10)
y = randrange(10)
elif ENV['numbers'] == "twodigits":
x = randrange(100)
y = randrange(100)
else:
x = randrange(2**32)
y = randrange(2**32)
print(f"? {x+y} {x*y}")
System.exit(0);
}
}
/*
while (true) {
spoon = scan.nextLine();
while (!(spoon.charAt(0) == '?')) {
spoon = scan.nextLine();
}
int s = Integer.parseInt(spoon.split(" ")[1]);
int p = Integer.parseInt(spoon.split(" ")[2]);
int delta = (int) Math.sqrt(s * s - 4 * p);
int x1 = (s - delta) / 2;
int x2 = s - x1;
System.out.println(x1 + " " + x2);
}
}
}
ENV={}
ENV['numbers'] = environ["TAL_numbers"]
ENV['num_questions'] = int(environ["TAL_num_questions"])
print(f"# I will serve: problem=sum, service=sum_and_product, numbers={ENV['numbers']}, num_questions={ENV['num_questions']}.")
gen_new_pair = True
for _ in range(ENV['num_questions']):
if gen_new_pair:
if ENV['numbers'] == "onedigit":
x = randrange(10)
y = randrange(10)
elif ENV['numbers'] == "twodigits":
x = randrange(100)
y = randrange(100)
else:
x = randrange(2**32)
y = randrange(2**32)
print(f"? {x+y} {x*y}")
spoon = input().strip()
while spoon[0] == '#':
spoon = input().strip()
a, b = map(int, spoon.split(" "))
gen_new_pair = False
if a+b > x+y:
print(f"No! indeed, {a}+{b}={a+b} > {x+y}.")
elif a+b < x+y:
print(f"No! indeed, {a}+{b}={a+b} < {x+y}.")
elif a*b > x*y:
print(f"No! indeed, {a}*{b}={a*b} > {x*y}.")
elif a*b < x*y:
print(f"No! indeed, {a}*{b}={a*b} < {x*y}.")
else:
assert (a + b == x+y) and (a * b == x*y)
print(f"Ok! indeed, {a}+{b}={x+y} and {a}*{b}={x*y}.")
gen_new_pair = True
exit(0)
*/
/*
while (true) {
spoon = scan.nextLine();
while (!(spoon.charAt(0) == '?')) {
spoon = scan.nextLine();
}
int s = Integer.parseInt(spoon.split(" ")[1]);
int p = Integer.parseInt(spoon.split(" ")[2]);
int delta = (int) Math.sqrt(s * s - 4 * p);
int x1 = (s - delta) / 2;
int x2 = s - x1;
System.out.println(x1 + " " + x2);
}
}
}
ENV={}
ENV['numbers'] = environ["TAL_numbers"]
ENV['num_questions'] = int(environ["TAL_num_questions"])
print(f"# I will serve: problem=sum, service=sum_and_product, numbers={ENV['numbers']}, num_questions={ENV['num_questions']}.")
gen_new_pair = True
for _ in range(ENV['num_questions']):
if gen_new_pair:
if ENV['numbers'] == "onedigit":
x = randrange(10)
y = randrange(10)
elif ENV['numbers'] == "twodigits":
x = randrange(100)
y = randrange(100)
else:
x = randrange(2**32)
y = randrange(2**32)
print(f"? {x+y} {x*y}")
spoon = input().strip()
while spoon[0] == '#':
spoon = input().strip()
a, b = map(int, spoon.split(" "))
gen_new_pair = False
if a+b > x+y:
print(f"No! indeed, {a}+{b}={a+b} > {x+y}.")
elif a+b < x+y:
print(f"No! indeed, {a}+{b}={a+b} < {x+y}.")
elif a*b > x*y:
print(f"No! indeed, {a}*{b}={a*b} > {x*y}.")
elif a*b < x*y:
print(f"No! indeed, {a}*{b}={a*b} < {x*y}.")
else:
assert (a + b == x+y) and (a * b == x*y)
print(f"Ok! indeed, {a}+{b}={x+y} and {a}*{b}={x*y}.")
gen_new_pair = True
exit(0)
*/
| [] | [] | [] | [] | [] | java | 0 | 0 | |
sdk/keyvault/azure-keyvault-secrets/samples/hello_world_async.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import datetime
import os
import asyncio
from azure.keyvault.secrets.aio import SecretClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-secrets and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic CRUD operations on a vault(secret) resource for Azure Key Vault
#
# 1. Create a new secret (set_secret)
#
# 2. Get an existing secret (get_secret)
#
# 3. Update an existing secret's properties (update_secret_properties)
#
# 4. Delete a secret (delete_secret)
#
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a secret client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = SecretClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a secret holding bank account credentials valid for 1 year.
# if the secret already exists in the key vault, then a new version of the secret is created.
print("\n.. Create Secret")
expires_on = datetime.datetime.utcnow() + datetime.timedelta(days=365)
secret = await client.set_secret("helloWorldSecretName", "helloWorldSecretValue", expires_on=expires_on)
print("Secret with name '{0}' created with value '{1}'".format(secret.name, secret.value))
print("Secret with name '{0}' expires on '{1}'".format(secret.name, secret.properties.expires_on))
# Let's get the bank secret using its name
print("\n.. Get a Secret by name")
bank_secret = await client.get_secret(secret.name)
print("Secret with name '{0}' was found with value '{1}'.".format(bank_secret.name, bank_secret.value))
# After one year, the bank account is still active, we need to update the expiry time of the secret.
# The update method can be used to update the expiry attribute of the secret. It cannot be used to update
# the value of the secret.
print("\n.. Update a Secret by name")
expires_on = bank_secret.properties.expires_on + datetime.timedelta(days=365)
updated_secret_properties = await client.update_secret_properties(secret.name, expires_on=expires_on)
print(
"Secret with name '{0}' was updated on date '{1}'".format(
updated_secret_properties.name, updated_secret_properties.updated_on
)
)
print(
"Secret with name '{0}' was updated to expire on '{1}'".format(
updated_secret_properties.name, updated_secret_properties.expires_on
)
)
# Bank forced a password update for security purposes. Let's change the value of the secret in the key vault.
# To achieve this, we need to create a new version of the secret in the key vault. The update operation cannot
# change the value of the secret.
new_secret = await client.set_secret(secret.name, "newSecretValue")
print("Secret with name '{0}' created with value '{1}'".format(new_secret.name, new_secret.value))
# The bank account was closed, need to delete its credentials from the Key Vault.
print("\n.. Deleting Secret...")
deleted_secret = await client.delete_secret(secret.name)
print("Secret with name '{0}' was deleted.".format(deleted_secret.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
| [] | [] | [
"VAULT_URL"
] | [] | ["VAULT_URL"] | python | 1 | 0 | |
repo/blob/gcs/gcs_storage_test.go | package gcs_test
import (
"bytes"
"compress/gzip"
"encoding/base64"
"io/ioutil"
"os"
"testing"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/internal/testlogging"
"github.com/kopia/kopia/internal/testutil"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/gcs"
)
func TestGCSStorage(t *testing.T) {
t.Parallel()
testutil.ProviderTest(t)
bucket := os.Getenv("KOPIA_GCS_TEST_BUCKET")
if bucket == "" {
t.Skip("KOPIA_GCS_TEST_BUCKET not provided")
}
credDataGZ, err := base64.StdEncoding.DecodeString(os.Getenv("KOPIA_GCS_CREDENTIALS_JSON_GZIP"))
if err != nil {
t.Skip("skipping test because GCS credentials file can't be decoded")
}
credData, err := gunzip(credDataGZ)
if err != nil {
t.Skip("skipping test because GCS credentials file can't be unzipped")
}
ctx := testlogging.Context(t)
st, err := gcs.New(ctx, &gcs.Options{
BucketName: bucket,
ServiceAccountCredentialJSON: credData,
})
if err != nil {
t.Fatalf("unable to connect to GCS: %v", err)
}
if err := st.ListBlobs(ctx, "", func(bm blob.Metadata) error {
return st.DeleteBlob(ctx, bm.BlobID)
}); err != nil {
t.Fatalf("unable to clear GCS bucket: %v", err)
}
blobtesting.VerifyStorage(ctx, t, st)
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st)
// delete everything again
if err := st.ListBlobs(ctx, "", func(bm blob.Metadata) error {
return st.DeleteBlob(ctx, bm.BlobID)
}); err != nil {
t.Fatalf("unable to clear GCS bucket: %v", err)
}
if err := st.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestGCSStorageInvalid(t *testing.T) {
t.Parallel()
testutil.ProviderTest(t)
bucket := os.Getenv("KOPIA_GCS_TEST_BUCKET")
if bucket == "" {
t.Skip("KOPIA_GCS_TEST_BUCKET not provided")
}
ctx := testlogging.Context(t)
if _, err := gcs.New(ctx, &gcs.Options{
BucketName: bucket + "-no-such-bucket",
ServiceAccountCredentialsFile: os.Getenv("KOPIA_GCS_CREDENTIALS_FILE"),
}); err == nil {
t.Fatalf("unexpected success connecting to GCS, wanted error")
}
}
func gunzip(d []byte) ([]byte, error) {
z, err := gzip.NewReader(bytes.NewReader(d))
if err != nil {
return nil, err
}
defer z.Close()
return ioutil.ReadAll(z)
}
| [
"\"KOPIA_GCS_TEST_BUCKET\"",
"\"KOPIA_GCS_CREDENTIALS_JSON_GZIP\"",
"\"KOPIA_GCS_TEST_BUCKET\"",
"\"KOPIA_GCS_CREDENTIALS_FILE\""
] | [] | [
"KOPIA_GCS_CREDENTIALS_JSON_GZIP",
"KOPIA_GCS_TEST_BUCKET",
"KOPIA_GCS_CREDENTIALS_FILE"
] | [] | ["KOPIA_GCS_CREDENTIALS_JSON_GZIP", "KOPIA_GCS_TEST_BUCKET", "KOPIA_GCS_CREDENTIALS_FILE"] | go | 3 | 0 | |
closed/Alibaba/code/common/system_list.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum, unique
from collections import OrderedDict
import re
import subprocess
import os
NVIDIA_SMI_GPU_REGEX = re.compile(r"GPU (\d+): ([\w\- ]+) \(UUID: (GPU-[0-9a-f\-]+)\)")
"""
re.Pattern: Regex to match nvidia-smi output for GPU information
match(1) - GPU index
match(2) - GPU name
match(3) - GPU UUID
"""
NVIDIA_SMI_MIG_REGEX = re.compile(
r"\s+MIG\s+(\d+)g.(\d+)gb\s+Device\s+(\d+):\s+\(UUID:\s+(MIG-[0-9a-f\-]+)\)"
)
"""
re.Pattern: Regex to match nvidia-smi output for MIG information
match(1) - Number of GPCs in the MIG slice
match(2) - Allocated video memory capacity in the MIG slice in GB
match(3) - MIG device ID
match(4) - MIG instance UUID
"""
@unique
class Architecture(Enum):
Turing = "Turing"
Xavier = "Xavier"
Ampere = "Ampere"
Intel_CPU_x86_64 = "Intel CPU x86_64"
Unknown = "Unknown"
class MIGSlice(object):
def __init__(self, num_gpcs, mem_gb, device_id=None, uuid=None):
"""
Describes a MIG instance. If optional arguments are set, then this MIGSlice describes an active MIG instance. If
optional arguments are not set, then this MIGSlice describes an uninstantiated, but supported MIG instance.
Arguments:
num_gpcs: Number of GPCs in this MIG slice
mem_gb: Allocated video memory capacity in this MIG slice in GB
Optional arguments:
device_id: Device ID of the GPU this MIG is a part of
uuid: UUID of this MIG instance
"""
self.num_gpcs = num_gpcs
self.mem_gb = mem_gb
self.device_id = device_id
self.uuid = uuid
# One cannot be set without the other.
assert (device_id is None) == (uuid is None)
def __str__(self):
return "{:d}g.{:d}gb".format(self.num_gpcs, self.mem_gb)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def is_active_slice(self):
return self.device_id is None
def get_gpu_uuid(self):
return self.uuid.split("/")[0][4:] # First 4 characters are "MIG-"
def get_gpu_instance_id(self):
return int(self.uuid.split("/")[1])
def get_compute_instance_id(self):
return int(self.uuid.split("/")[2])
class MIGConfiguration(object):
def __init__(self, conf):
"""
Stores information about a system's MIG configuration.
conf: An OrderedDict of gpu_id -> { MIGSlice -> Count }
"""
self.conf = conf
def check_compatible(self, valid_mig_slices):
"""
Given a list of valid MIGSlices, checks if this MIGConfiguration only contains MIGSlices that are described in
the list.
"""
m = {str(mig) for mig in valid_mig_slices}
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
if str(mig) not in m:
return False
return True
def num_gpus(self):
"""
Returns the number of GPUs with active MIG instances in this MIGConfiguration.
"""
return len(self.conf)
def num_mig_slices(self):
"""
Returns the number of total active MIG instances across all GPUs in this MIGConfiguration
"""
i = 0
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
i += self.conf[gpu_id][mig]
return i
def __str__(self):
"""
Returns a string that describes this MIG configuration.
Examples:
- For 1x 1-GPC: 1x1g.10gb
- For 1x 1-GPC, 2x 2-GPC, and 3x 3-GPC: 1x1g.10gb_2x2g.20gb_1x3g.30gb
"""
# Add up the slices on each GPU by MIGSlice
flattened = OrderedDict()
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
if mig not in flattened:
flattened[mig] = 0
flattened[mig] += self.conf[gpu_id][mig]
return "_".join(sorted(["{}x{}".format(flattened[mig], str(mig))]))
@staticmethod
def get_gpu_mig_slice_mapping():
"""
Returns a dict containing mapping between gpu uuid and list of mig slices on that gpu.
"""
p = subprocess.Popen("nvidia-smi -L", universal_newlines=True, shell=True, stdout=subprocess.PIPE)
gpu_mig_slice_mapping = dict()
for line in p.stdout:
gpu_match = NVIDIA_SMI_GPU_REGEX.match(line)
if gpu_match is not None:
gpu_uuid = gpu_match.group(3)
gpu_mig_slice_mapping[gpu_uuid] = []
mig_match = NVIDIA_SMI_MIG_REGEX.match(line)
if mig_match is not None:
gpu_mig_slice_mapping[gpu_uuid].append(MIGSlice(
int(mig_match.group(1)), # num_gpcs
int(mig_match.group(2)), # mem_gb
int(mig_match.group(3)), # device_id
mig_match.group(4) # uuid
))
return gpu_mig_slice_mapping
@staticmethod
def from_nvidia_smi():
visible_gpu_instances = set()
if os.environ.get("CUDA_VISIBLE_DEVICES"):
for g in os.environ.get("CUDA_VISIBLE_DEVICES").split(","):
if g.startswith("MIG"):
visible_gpu_instances.add(g)
gpu_mig_slice_mapping = MIGConfiguration.get_gpu_mig_slice_mapping()
conf = OrderedDict()
for gpu, mig_slices in gpu_mig_slice_mapping.items():
conf[gpu] = {}
for mig_slice in mig_slices:
if (not os.environ.get("CUDA_VISIBLE_DEVICES")) or (
mig_slice.uuid in visible_gpu_instances
):
if mig_slice not in conf[gpu]:
conf[gpu][mig_slice] = 0
conf[gpu][mig_slice] += 1
return MIGConfiguration(conf)
class System(object):
"""
System class contains information on the GPUs used in our submission systems.
gpu: ID of the GPU being used
pci_id: PCI ID of the GPU
arch: Architecture of the GPU
count: Number of GPUs used on the system
mig_conf: MIG configuration (if applicable)
"""
def __init__(self, gpu, arch, count, pci_id=None, mig_conf=None):
self.gpu = gpu
self.arch = arch
self.count = count
self.mig_conf = mig_conf
self.pci_id = pci_id
self.uses_mig = mig_conf is not None
def get_id(self):
sid = f"{self.gpu}x{self.count}" if "Xavier" not in self.gpu else self.gpu
if self.mig_conf is not None:
sid = "{:}-MIG_{:}".format(self.gpu, str(self.mig_conf))
return sid
def __str__(self):
return self.get_id()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class SystemClass(object):
def __init__(self, gpu, aliases, pci_ids, arch, supported_counts, valid_mig_slices=None):
"""
SystemClass describes classes of submissions systems with different variations. SystemClass objects are
hardcoded as supported systems and must be defined in KnownSystems below to be recognized a valid system for the
pipeline.
Args:
gpu: ID of the GPU being used, usually the name reported by nvidia-smi
aliases: Different names of cards reported by nvidia-smi that use the same SKUs, i.e. Titan RTX and Quadro
RTX 8000
pci_ids: PCI IDs of cards that match this system configuration that use the same SKUs
arch: Architecture of the GPU
supported_counts: Counts of GPUs for supported multi-GPU systems, i.e. [1, 2, 4] to support 1x, 2x, and 4x
GPU systems
valid_mig_slices: List of supported MIGSlices. None if MIG not supported.
"""
self.gpu = gpu
self.aliases = aliases
self.pci_ids = pci_ids
self.arch = arch
self.supported_counts = supported_counts
self.valid_mig_slices = valid_mig_slices
self.supports_mig = valid_mig_slices is not None
def __str__(self):
return "SystemClass(gpu={}, aliases={}, pci_ids={}, arch={}, counts={})".format(
self.gpu,
self.aliases,
self.pci_ids,
self.arch,
self.supported_counts)
def get_match(self, name, count, pci_id=None, mig_conf=None):
"""
Attempts to match a certain GPU configuration with this SystemClass. If the configuration does not match,
returns None. Otherwise, returns a System object with metadata about the configuration.
mig_conf should be a MIGConfiguration object.
"""
# PCI ID has precedence over name, as pre-release chips often are not named yet in nvidia-smi
gpu_match = False
if pci_id is not None and len(self.pci_ids) > 0:
gpu_match = pci_id in self.pci_ids
# Attempt to match a name if PCI ID is not specified, or if the system has no known PCI IDs
# This is an else block, but we explicitly show condition for clarity
elif pci_id is None or len(self.pci_ids) == 0:
gpu_match = name in self.aliases
if not gpu_match:
return None
# If GPU matches, match the count and mig configs (if applicable)
if count not in self.supported_counts:
return None
if self.supports_mig and mig_conf is not None and not mig_conf.check_compatible(self.valid_mig_slices):
return None
return System(self.gpu, self.arch, count, pci_id=pci_id, mig_conf=mig_conf)
class KnownSystems(object):
"""
Global List of supported systems
"""
A100_PCIe_40GB = SystemClass("A100-PCIe", ["A100-PCIE-40GB"], ["20F1", "20BF"], Architecture.Ampere, [1, 2, 8])
A100_PCIe_80GB = SystemClass("A100-PCIe-80GB", ["A100-PCIE-80GB"], ["PLACEHOLDER. REPLACE ME WITH ACTUAL PCI ID"], Architecture.Ampere, [1, 2, 8]) # Placeholder
A100_SXM4_40GB = SystemClass("A100-SXM4-40GB", ["A100-SXM4-40GB"], ["20B0"], Architecture.Ampere, [1, 8],
valid_mig_slices=[MIGSlice(1, 5), MIGSlice(2, 10), MIGSlice(3, 20)])
A100_SXM_80GB = SystemClass("A100-SXM-80GB", ["A100-SXM-80GB"], ["20B2"], Architecture.Ampere, [1, 4, 8],
valid_mig_slices=[MIGSlice(1, 10), MIGSlice(2, 20), MIGSlice(3, 40)])
GeForceRTX_3080 = SystemClass("GeForceRTX3080", ["GeForce RTX 3080"], ["2206"], Architecture.Ampere, [1])
GeForceRTX_3090 = SystemClass("GeForceRTX3090", ["GeForce RTX 3090", "Quadro RTX A6000", "RTX A6000"],
["2204", "2230"], Architecture.Ampere, [1])
A10 = SystemClass("A10", ["A10"], ["2236"], Architecture.Ampere, [1, 8])
T4 = SystemClass("T4", ["Tesla T4", "T4 32GB"], ["1EB8", "1EB9"], Architecture.Turing, [1, 8, 20])
TitanRTX = SystemClass("TitanRTX", ["TITAN RTX", "Quadro RTX 8000", "Quadro RTX 6000"], ["1E02", "1E30", "1E36"],
Architecture.Turing, [1, 4])
AGX_Xavier = SystemClass("AGX_Xavier", ["Jetson-AGX"], [], Architecture.Xavier, [1])
Xavier_NX = SystemClass("Xavier_NX", ["Xavier NX"], [], Architecture.Xavier, [1])
A30 = SystemClass("A30", ["A30"], ["20B7"], Architecture.Ampere, [1, 8],
valid_mig_slices=[MIGSlice(1, 6), MIGSlice(2, 12), MIGSlice(4, 24)])
# CPU Systems
Triton_CPU_2S_6258R = SystemClass("Triton_CPU_2S_6258R", ["2S_6258R"], [], Architecture.Intel_CPU_x86_64, [1])
Triton_CPU_4S_8380H = SystemClass("Triton_CPU_4S_8380H", ["4S_8380H"], [], Architecture.Intel_CPU_x86_64, [1])
@staticmethod
def get_all_system_classes():
return [
getattr(KnownSystems, attr)
for attr in dir(KnownSystems)
if type(getattr(KnownSystems, attr)) == SystemClass
]
@staticmethod
def get_all_systems():
all_classes = KnownSystems.get_all_system_classes()
all_systems = []
for system_class in all_classes:
for count in system_class.supported_counts:
all_systems.append(System(system_class.gpu, system_class.arch, count))
if count == 1 and system_class.valid_mig_slices is not None:
for mig_slice in system_class.valid_mig_slices:
conf = {"DummyGPU": {mig_slice: 1}}
mig_conf = MIGConfiguration(conf)
all_systems.append(System(system_class.gpu, system_class.arch, count, mig_conf=mig_conf))
# Special handling for 56 MIG DGX-A100
all_systems.append(
System("A100-SXM-80GB", Architecture.Ampere, 8,
mig_conf=MIGConfiguration({"DummyGPU": {MIGSlice(1, 10): 56}}))
)
return all_systems
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
megatron/utils.py | # coding=utf-8
# Copyright (c) 2021 Josh Levy-Kramer <[email protected]>.
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities."""
import os
import sys
import re
import time
import socket
from typing import Dict, List
import requests
import wandb
from wandb import UsageError
import torch
from deepspeed.launcher.runner import fetch_hostfile, parse_inclusion_exclusion
from megatron import print_rank_0
from megatron import mpu
from deepspeed import PipelineEngine, DeepSpeedEngine
from collections import deque
def reduce_losses(losses):
"""Reduce a tensor of losses across all GPUs."""
reduced_losses = torch.cat([loss.clone().detach().view(1) for loss in losses])
torch.distributed.all_reduce(reduced_losses)
reduced_losses = reduced_losses / torch.distributed.get_world_size()
return reduced_losses
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + " memory (MB)"
string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes)
string += " | max allocated: {}".format(
torch.cuda.max_memory_allocated() / mega_bytes
)
string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes)
string += " | max reserved: {}".format(
torch.cuda.max_memory_reserved() / mega_bytes
)
print_rank_0(string)
def get_attn_mask(seq_length, device):
"""
Get triangular attention mask for a given sequence length / device.
"""
# lower triangular attention mask
mask = torch.tril(torch.ones(
(1, seq_length, seq_length), device=device)).view(
1, 1, seq_length, seq_length)
# convert to binary
return (mask < 0.5)
def get_ltor_masks_and_position_ids(data,
eod_token,
eod_mask_loss=False):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
batch_size, seq_length = data.size()
# Attention mask (lower triangular).
attention_mask = get_attn_mask(seq_length=seq_length, device=data.device)
# Loss mask.
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
if eod_mask_loss:
loss_mask[data == eod_token] = 0.0
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
return attention_mask, loss_mask, position_ids
def local_rank():
""" Local rank of process """
local_rank = os.environ.get("LOCAL_RANK")
if local_rank is None:
print(
"utils.local_rank() environment variable LOCAL_RANK not set, defaulting to 0",
flush=True,
)
local_rank = 0
return int(local_rank)
def is_bnb_available():
""" True if bitsandbytes optimizers are available """
return importlib.util.find_spec("bitsandbytes") is not None
def is_local_main():
""" True if is the local main process """
return local_rank() == 0
def is_mp_rank_0():
"""True if mp rank == 0"""
return mpu.get_model_parallel_rank() == 0
def get_wandb_api_key(neox_args):
""" Get Weights and Biases API key from ENV or .netrc file. Otherwise return None """
if "WANDB_LOCAL" in os.environ:
return "LOCAL"
if "WANDB_API_KEY" in os.environ:
return os.environ["WANDB_API_KEY"]
wandb_token = requests.utils.get_netrc_auth(neox_args.wandb_host)
if wandb_token is not None:
return wandb_token[1]
def init_wandb(neox_args):
# Wandb. (one worker per machine)
if neox_args.use_wandb == False:
return
use_wandb = is_local_main() and (get_wandb_api_key(neox_args=neox_args) is not None)
neox_args.update_value("use_wandb", use_wandb)
if neox_args.use_wandb:
group_name = neox_args.wandb_group
name = f"{socket.gethostname()}-{local_rank()}" if group_name else None
try:
wandb.init(
project=neox_args.wandb_project,
group=group_name,
name=name,
save_code=False,
force=False,
entity=neox_args.wandb_team,
)
except UsageError as e:
neox_args.update_value("use_wandb", False)
print(e)
print(
"Skipping wandb. Execute `wandb login` on local or main node machine to enable.",
flush=True,
)
wandb.config.update(neox_args.all_config)
def obtain_resource_pool(
hostfile_path, include_arg, exclude_arg
) -> Dict[str, List[int]]:
"""
Get dict of `resource_pool[hostname] = [list of GPU ranks]` using hostfile, include and exclude args.
Modified from: `deepspeed.launcher.runner.main`
"""
resource_pool = fetch_hostfile(hostfile_path)
if not resource_pool:
resource_pool = {}
device_count = torch.cuda.device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool["localhost"] = device_count
active_resources = parse_inclusion_exclusion(
resource_pool, include_arg, exclude_arg
)
return active_resources
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split("([0-9]+)", key)]
return sorted(l, key=alphanum_key)
def ddb(rank=0):
"""
Distributed Debugger that will insert a py debugger on rank `rank` and
pause all other distributed processes until debugging is complete.
:param rank:
"""
if torch.distributed.get_rank() == rank:
from pdb import Pdb
pdb = Pdb(skip=["torch.distributed.*"])
pdb.set_trace(sys._getframe().f_back)
torch.distributed.barrier()
class Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, "timer has already been started"
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, "timer is not started"
torch.cuda.synchronize()
self.elapsed_ += time.time() - self.start_time
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class Timers:
"""Group of timers."""
def __init__(self, use_wandb, tensorboard_writer):
self.timers = {}
self.use_wandb = use_wandb
self.tensorboard_writer = tensorboard_writer
def __call__(self, name):
if name not in self.timers:
self.timers[name] = Timer(name)
return self.timers[name]
def write(self, names, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
if self.tensorboard_writer:
self.tensorboard_writer.add_scalar(f"timers/{name}", value, iteration)
if self.use_wandb:
wandb.log({f"timers/{name}": value}, step=iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = "time (ms)"
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += " | {}: {:.2f}".format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(string, flush=True)
else:
print(string, flush=True)
def expand_attention_types(attention_config, num_layers):
"""
Expands an `attention_config` list in the following format:
[
[['attention_type_1', ..., `attention_type_n`], 12]
]
to a flattened list of length `num_layers`.
:param params_list:
:return:
"""
# if only strings are found in the config, we assume it's already expanded
if all([isinstance(i, str) for i in attention_config]):
return attention_config
newlist = []
for item in attention_config:
# instead of specifying a number - we can specify 'all' to extend this pattern across all layers
if item[1] == "all":
assert num_layers % len(item[0]) == 0, (
f"Number of layers ({num_layers}) is not divisible by the length "
f"of pattern: {item[0]}"
)
return item[0] * (num_layers // len(item[0]))
for _ in range(item[1]):
newlist.extend(item[0])
return newlist
class OverflowMonitor:
"""
Checks if the past n iterations have been skipped due to overflow, and exits
training if that happens.
"""
def __init__(self, optimizer, n=50):
self.optimizer = optimizer
self.n = n
self.history = deque(maxlen=n)
def check(self, skipped):
self.history.append(skipped)
if (
self.optimizer.overflow
and len(self.history) == self.n
and all(self.history)
):
raise Exception(
f"Skipped {self.n} iterations in a row due to Overflow - Exiting training."
)
def get_noise_scale_logger(neox_args):
if neox_args.log_gradient_noise_scale:
if neox_args.zero_stage >= 1:
raise NotImplementedError(
"Gradient Noise Scale logging does not work with zero stage 2+, as the "
"gradients are distributed across ranks."
)
noise_scale_logger = GradientNoiseScale(
model=model,
batch_size_small=neox_args.train_batch_size,
n_batches=neox_args.gradient_noise_scale_n_batches,
cpu_offload=neox_args.gradient_noise_scale_cpu_offload,
neox_args=neox_args,
mpu=mpu,
)
else:
noise_scale_logger = None
return noise_scale_logger
def get_total_params(model):
# Print number of parameters.
if mpu.get_data_parallel_rank() == 0:
params = sum([p.nelement() for p in model.parameters()])
print(
" > number of parameters on model parallel rank {}: {}".format(
mpu.get_model_parallel_rank(), params
),
flush=True,
)
else:
params = 0
total_n_parameters = torch.tensor([params]).cuda(torch.cuda.current_device())
torch.distributed.all_reduce(total_n_parameters)
total_n_parameters = total_n_parameters.item()
return total_n_parameters
def setup_for_inference_or_eval(
inference=True, get_key_value=True, overwrite_values=None
):
"""
Initializes the model for evaluation or inference (doesn't load optimizer states, etc.) from command line args.
inference: bool
Whether to initialize in inference mode
get_key_value: bool
Whether to use key value caching in inference.
overwrite_values: dict
Optional Values to overwrite in the model config.
"""
from megatron.neox_arguments import NeoXArgs
from megatron.initialize import initialize_megatron
from megatron.training import setup_model_and_optimizer
_overwrite_values = {
"checkpoint_activations": False,
"partition_activations": False,
"no_load_optim": True,
'zero_optimization': None, # disable zero optimization (won't be used in inference, and loading zero optimizer can cause errors)
}
if overwrite_values:
_overwrite_values.update(overwrite_values)
neox_args = NeoXArgs.consume_neox_args(overwrite_values=_overwrite_values)
neox_args.configure_distributed_args()
neox_args.build_tokenizer()
if neox_args.load is None:
raise ValueError("`load` parameter must be supplied to load a model`")
# initialize megatron
initialize_megatron(neox_args)
# set up model and load checkpoint.
model, _, _ = setup_model_and_optimizer(
neox_args=neox_args, inference=inference, get_key_value=get_key_value
) # we use setup_model_and_optimizer instead of get_model in order to initialize deepspeed
print_rank_0("Finished loading model")
return model, neox_args
class CharCounter:
"""
Wraps the data_iterator to count the number of characters in a batch
"""
def __init__(self, data_iterator, tokenizer):
self.tokenizer = tokenizer
self.data_iterator = data_iterator
self.char_count = 0
self.batch_count = 0
self.token_count = 0
self.total_time = 0
def tokens_per_char(self):
return self.token_count / self.char_count
def __iter__(self):
return self
def __next__(self):
start = time.time()
batch = self.data_iterator.__next__()
for b in batch["text"]:
self.token_count += len(b)
self.char_count += len(self.tokenizer.detokenize(b.tolist()))
self.batch_count += 1
end = time.time()
self.total_time += end - start
return batch
| [] | [] | [
"LOCAL_RANK",
"WANDB_API_KEY"
] | [] | ["LOCAL_RANK", "WANDB_API_KEY"] | python | 2 | 0 | |
providers/dropbox/dropbox_test.go | package dropbox
import (
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/yoyoyard/goth"
)
func provider() *Provider {
return New(os.Getenv("DROPBOX_KEY"), os.Getenv("DROPBOX_SECRET"), "/foo", "email")
}
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("DROPBOX_KEY"))
a.Equal(p.Secret, os.Getenv("DROPBOX_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_ImplementsSession(t *testing.T) {
t.Parallel()
a := assert.New(t)
s := &Session{}
a.Implements((*goth.Session)(nil), s)
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*Session)
a.NoError(err)
a.Contains(s.AuthURL, "www.dropbox.com/oauth2/authorize")
}
func Test_FetchUser(t *testing.T) {
accountPath := "/2/users/get_current_account"
t.Parallel()
a := assert.New(t)
p := provider()
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
a.Equal(r.Header.Get("Authorization"), "Bearer 1234567890")
a.Equal(r.Method, "POST")
a.Equal(r.URL.Path, accountPath)
w.Write([]byte(testAccountResponse))
}))
p.AccountURL = ts.URL + accountPath
// AuthURL is superfluous for this test but ok
session, err := p.UnmarshalSession(`{"AuthURL":"https://www.dropbox.com/oauth2/authorize","Token":"1234567890"}`)
a.NoError(err)
user, err := p.FetchUser(session)
a.NoError(err)
a.Equal(user.UserID, "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc")
a.Equal(user.FirstName, "Franz")
a.Equal(user.LastName, "Ferdinand")
a.Equal(user.Name, "Franz Ferdinand")
a.Equal(user.Description, "Franz Ferdinand (Personal)")
a.Equal(user.NickName, "[email protected]")
a.Equal(user.Email, "[email protected]")
a.Equal(user.Location, "US")
a.Equal(user.AccessToken, "1234567890")
a.Equal(user.AccessTokenSecret, "")
a.Equal(user.AvatarURL, "https://dl-web.dropbox.com/account_photo/get/dbid%3AAAH4f99T0taONIb-OurWxbNQ6ywGRopQngc?vers=1453416673259\u0026size=128x128")
a.Equal(user.Provider, "dropbox")
a.Len(user.RawData, 14)
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://www.dropbox.com/oauth2/authorize","Token":"1234567890"}`)
a.NoError(err)
s := session.(*Session)
a.Equal(s.AuthURL, "https://www.dropbox.com/oauth2/authorize")
a.Equal(s.Token, "1234567890")
}
func Test_SessionToJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
s := &Session{}
data := s.Marshal()
a.Equal(data, `{"AuthURL":"","Token":""}`)
}
func Test_GetAuthURL(t *testing.T) {
t.Parallel()
a := assert.New(t)
s := &Session{}
_, err := s.GetAuthURL()
a.Error(err)
s.AuthURL = "/foo"
url, _ := s.GetAuthURL()
a.Equal(url, "/foo")
}
var testAccountResponse = `
{
"account_id": "dbid:AAH4f99T0taONIb-OurWxbNQ6ywGRopQngc",
"name": {
"given_name": "Franz",
"surname": "Ferdinand",
"familiar_name": "Franz",
"display_name": "Franz Ferdinand (Personal)",
"abbreviated_name": "FF"
},
"email": "[email protected]",
"email_verified": true,
"disabled": false,
"locale": "en",
"referral_link": "https://db.tt/ZITNuhtI",
"is_paired": true,
"account_type": {
".tag": "business"
},
"root_info": {
".tag": "user",
"root_namespace_id": "3235641",
"home_namespace_id": "3235641"
},
"country": "US",
"team": {
"id": "dbtid:AAFdgehTzw7WlXhZJsbGCLePe8RvQGYDr-I",
"name": "Acme, Inc.",
"sharing_policies": {
"shared_folder_member_policy": {
".tag": "team"
},
"shared_folder_join_policy": {
".tag": "from_anyone"
},
"shared_link_create_policy": {
".tag": "team_only"
}
},
"office_addin_policy": {
".tag": "disabled"
}
},
"profile_photo_url": "https://dl-web.dropbox.com/account_photo/get/dbid%3AAAH4f99T0taONIb-OurWxbNQ6ywGRopQngc?vers=1453416673259\u0026size=128x128",
"team_member_id": "dbmid:AAHhy7WsR0x-u4ZCqiDl5Fz5zvuL3kmspwU"
}
`
| [
"\"DROPBOX_KEY\"",
"\"DROPBOX_SECRET\"",
"\"DROPBOX_KEY\"",
"\"DROPBOX_SECRET\""
] | [] | [
"DROPBOX_SECRET",
"DROPBOX_KEY"
] | [] | ["DROPBOX_SECRET", "DROPBOX_KEY"] | go | 2 | 0 | |
http-echo-test/httpd.go | package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"sort"
"strings"
)
var hostname string
func requestHandler(response http.ResponseWriter, request *http.Request) {
fmt.Fprintf(response, "Server: %v\n\n", hostname)
fmt.Fprintln(response, "Method:", request.Method)
fmt.Fprintln(response, "Host:", request.Host)
fmt.Fprintln(response, "URL:", request.URL)
fmt.Fprintln(response, "Protocol:", request.Proto)
fmt.Fprintln(response, "Client:", request.RemoteAddr)
fmt.Fprintln(response, "\nHeaders:\n--------")
var headers []string
for name := range request.Header {
headers = append(headers, name)
}
sort.Strings(headers)
for _, name := range headers {
fmt.Fprintf(response, "%v: %v\n", name, strings.Join(request.Header[name], ", "))
}
_ = request.ParseMultipartForm(100000)
if len(request.PostForm) > 0 {
fmt.Fprintln(response, "\nForm data:\n----------")
for key, value := range request.PostForm {
fmt.Fprintf(response, "%v: %v\n", key, value)
}
}
body, err := ioutil.ReadAll(request.Body)
if err != nil {
log.Println("Error while reading request body:", err)
}
if len(body) > 0 {
fmt.Fprintln(response, "\nRequest body:\n-------------")
fmt.Fprintln(response, string(body))
}
log.Printf("%v %v (%v)\n", request.Method, request.URL, request.RemoteAddr)
}
func main() {
// Default listen port
listenPort := "8080"
var err error
if os.Getenv("PORT") != "" {
listenPort = os.Getenv("PORT")
if err != nil {
log.Fatalln(err)
}
}
if len(os.Args) > 1 {
listenPort = os.Args[1]
if err != nil {
log.Fatalln(err)
}
}
hostname, _ = os.Hostname()
log.Printf("Listening on http://0.0.0.0:%v\n", listenPort)
http.HandleFunc("/", requestHandler)
http.ListenAndServe(":"+listenPort, nil)
}
| [
"\"PORT\"",
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
MySite/wsgi.py | """
WSGI config for MySite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MySite.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/rootless/rootless_linux.go | // +build linux
package rootless
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
gosignal "os/signal"
"os/user"
"runtime"
"strconv"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/idtools"
"github.com/docker/docker/pkg/signal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
/*
#cgo remoteclient CFLAGS: -DDISABLE_JOIN_SHORTCUT
#include <stdlib.h>
extern uid_t rootless_uid();
extern uid_t rootless_gid();
extern int reexec_in_user_namespace(int ready, char *pause_pid_file_path);
extern int reexec_in_user_namespace_wait(int pid);
extern int reexec_userns_join(int userns, int mountns, char *pause_pid_file_path);
*/
import "C"
const (
numSig = 65 // max number of signals
)
func runInUser() error {
os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
return nil
}
var (
isRootlessOnce sync.Once
isRootless bool
)
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
rootlessUIDInit := int(C.rootless_uid())
rootlessGIDInit := int(C.rootless_gid())
if rootlessUIDInit != 0 {
// This happens if we joined the user+mount namespace as part of
os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
os.Setenv("_CONTAINERS_ROOTLESS_UID", fmt.Sprintf("%d", rootlessUIDInit))
os.Setenv("_CONTAINERS_ROOTLESS_GID", fmt.Sprintf("%d", rootlessGIDInit))
}
isRootless = os.Geteuid() != 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != ""
})
return isRootless
}
// GetRootlessUID returns the UID of the user in the parent userNS
func GetRootlessUID() int {
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Geteuid()
}
// GetRootlessGID returns the GID of the user in the parent userNS
func GetRootlessGID() int {
gidEnv := os.Getenv("_CONTAINERS_ROOTLESS_GID")
if gidEnv != "" {
u, _ := strconv.Atoi(gidEnv)
return u
}
/* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Getegid()
}
func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap) error {
path, err := exec.LookPath(tool)
if err != nil {
return errors.Wrapf(err, "cannot find %s", tool)
}
appendTriplet := func(l []string, a, b, c int) []string {
return append(l, fmt.Sprintf("%d", a), fmt.Sprintf("%d", b), fmt.Sprintf("%d", c))
}
args := []string{path, fmt.Sprintf("%d", pid)}
args = appendTriplet(args, 0, hostID, 1)
if mappings != nil {
for _, i := range mappings {
args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size)
}
}
cmd := exec.Cmd{
Path: path,
Args: args,
}
if output, err := cmd.CombinedOutput(); err != nil {
logrus.Debugf("error from %s: %s", tool, output)
return errors.Wrapf(err, "cannot setup namespace using %s", tool)
}
return nil
}
func readUserNs(path string) (string, error) {
b := make([]byte, 256)
_, err := syscall.Readlink(path, b)
if err != nil {
return "", err
}
return string(b), nil
}
func readUserNsFd(fd uintptr) (string, error) {
return readUserNs(fmt.Sprintf("/proc/self/fd/%d", fd))
}
func getParentUserNs(fd uintptr) (uintptr, error) {
const nsGetParent = 0xb702
ret, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(nsGetParent), 0)
if errno != 0 {
return 0, errno
}
return (uintptr)(unsafe.Pointer(ret)), nil
}
// getUserNSFirstChild returns an open FD for the first direct child user namespace that created the process
// Each container creates a new user namespace where the runtime runs. The current process in the container
// might have created new user namespaces that are child of the initial namespace we created.
// This function finds the initial namespace created for the container that is a child of the current namespace.
//
// current ns
// / \
// TARGET -> a [other containers]
// /
// b
// /
// NS READ USING THE PID -> c
func getUserNSFirstChild(fd uintptr) (*os.File, error) {
currentNS, err := readUserNs("/proc/self/ns/user")
if err != nil {
return nil, err
}
ns, err := readUserNsFd(fd)
if err != nil {
return nil, errors.Wrapf(err, "cannot read user namespace")
}
if ns == currentNS {
return nil, errors.New("process running in the same user namespace")
}
for {
nextFd, err := getParentUserNs(fd)
if err != nil {
return nil, errors.Wrapf(err, "cannot get parent user namespace")
}
ns, err = readUserNsFd(nextFd)
if err != nil {
return nil, errors.Wrapf(err, "cannot read user namespace")
}
if ns == currentNS {
syscall.Close(int(nextFd))
// Drop O_CLOEXEC for the fd.
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0)
if errno != 0 {
syscall.Close(int(fd))
return nil, errno
}
return os.NewFile(fd, "userns child"), nil
}
syscall.Close(int(fd))
fd = nextFd
}
}
// JoinUserAndMountNS re-exec podman in a new userNS and join the user and mount
// namespace of the specified PID without looking up its parent. Useful to join directly
// the conmon process.
func JoinUserAndMountNS(pid uint, pausePid string) (bool, int, error) {
if os.Geteuid() == 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" {
return false, -1, nil
}
cPausePid := C.CString(pausePid)
defer C.free(unsafe.Pointer(cPausePid))
userNS, err := os.Open(fmt.Sprintf("/proc/%d/ns/user", pid))
if err != nil {
return false, -1, err
}
defer userNS.Close()
mountNS, err := os.Open(fmt.Sprintf("/proc/%d/ns/mnt", pid))
if err != nil {
return false, -1, err
}
defer userNS.Close()
fd, err := getUserNSFirstChild(userNS.Fd())
if err != nil {
return false, -1, err
}
pidC := C.reexec_userns_join(C.int(fd.Fd()), C.int(mountNS.Fd()), cPausePid)
if int(pidC) < 0 {
return false, -1, errors.Errorf("cannot re-exec process")
}
ret := C.reexec_in_user_namespace_wait(pidC)
if ret < 0 {
return false, -1, errors.New("error waiting for the re-exec process")
}
return true, int(ret), nil
}
// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed
// into a new user namespace and the return code from the re-executed podman process.
// If podman was re-executed the caller needs to propagate the error code returned by the child
// process.
func BecomeRootInUserNS(pausePid string) (bool, int, error) {
if os.Geteuid() == 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" {
if os.Getenv("_CONTAINERS_USERNS_CONFIGURED") == "init" {
return false, 0, runInUser()
}
return false, 0, nil
}
cPausePid := C.CString(pausePid)
defer C.free(unsafe.Pointer(cPausePid))
runtime.LockOSThread()
defer runtime.UnlockOSThread()
fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0)
if err != nil {
return false, -1, err
}
r, w := os.NewFile(uintptr(fds[0]), "sync host"), os.NewFile(uintptr(fds[1]), "sync child")
defer r.Close()
defer w.Close()
defer w.Write([]byte("0"))
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid)
pid := int(pidC)
if pid < 0 {
return false, -1, errors.Errorf("cannot re-exec process")
}
var uids, gids []idtools.IDMap
username := os.Getenv("USER")
if username == "" {
user, err := user.LookupId(fmt.Sprintf("%d", os.Getuid()))
if err == nil {
username = user.Username
}
}
mappings, err := idtools.NewIDMappings(username, username)
if err != nil {
logrus.Warnf("cannot find mappings for user %s: %v", username, err)
} else {
uids = mappings.UIDs()
gids = mappings.GIDs()
}
uidsMapped := false
if mappings != nil && uids != nil {
err := tryMappingTool("newuidmap", pid, os.Getuid(), uids)
uidsMapped = err == nil
}
if !uidsMapped {
logrus.Warnf("using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding subids")
setgroups := fmt.Sprintf("/proc/%d/setgroups", pid)
err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write setgroups file")
}
uidMap := fmt.Sprintf("/proc/%d/uid_map", pid)
err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getuid())), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write uid_map")
}
}
gidsMapped := false
if mappings != nil && gids != nil {
err := tryMappingTool("newgidmap", pid, os.Getgid(), gids)
gidsMapped = err == nil
}
if !gidsMapped {
gidMap := fmt.Sprintf("/proc/%d/gid_map", pid)
err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getgid())), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write gid_map")
}
}
_, err = w.Write([]byte("0"))
if err != nil {
return false, -1, errors.Wrapf(err, "write to sync pipe")
}
b := make([]byte, 1, 1)
_, err = w.Read(b)
if err != nil {
return false, -1, errors.Wrapf(err, "read from sync pipe")
}
if b[0] == '2' {
// We have lost the race for writing the PID file, as probably another
// process created a namespace and wrote the PID.
// Try to join it.
data, err := ioutil.ReadFile(pausePid)
if err == nil {
pid, err := strconv.ParseUint(string(data), 10, 0)
if err == nil {
return JoinUserAndMountNS(uint(pid), "")
}
}
return false, -1, errors.Wrapf(err, "error setting up the process")
}
if b[0] != '0' {
return false, -1, errors.Wrapf(err, "error setting up the process")
}
c := make(chan os.Signal, 1)
signals := []os.Signal{}
for sig := 0; sig < numSig; sig++ {
if sig == int(syscall.SIGTSTP) {
continue
}
signals = append(signals, syscall.Signal(sig))
}
gosignal.Notify(c, signals...)
defer gosignal.Reset()
go func() {
for s := range c {
if s == signal.SIGCHLD || s == signal.SIGPIPE {
continue
}
syscall.Kill(int(pidC), s.(syscall.Signal))
}
}()
ret := C.reexec_in_user_namespace_wait(pidC)
if ret < 0 {
return false, -1, errors.New("error waiting for the re-exec process")
}
return true, int(ret), nil
}
| [
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"_CONTAINERS_ROOTLESS_GID\"",
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"USER\""
] | [] | [
"_CONTAINERS_ROOTLESS_UID",
"_CONTAINERS_USERNS_CONFIGURED",
"USER",
"_CONTAINERS_ROOTLESS_GID"
] | [] | ["_CONTAINERS_ROOTLESS_UID", "_CONTAINERS_USERNS_CONFIGURED", "USER", "_CONTAINERS_ROOTLESS_GID"] | go | 4 | 0 | |
orders/src/get_order/main.py | """
GetOrdersFunction
"""
import os
from typing import Optional
import boto3
from aws_lambda_powertools.tracing import Tracer # pylint: disable=import-error
from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context # pylint: disable=import-error
from ecom.apigateway import iam_user_id, response # pylint: disable=import-error
ENVIRONMENT = os.environ["ENVIRONMENT"]
TABLE_NAME = os.environ["TABLE_NAME"]
dynamodb = boto3.resource("dynamodb") # pylint: disable=invalid-name
table = dynamodb.Table(TABLE_NAME) # pylint: disable=invalid-name,no-member
logger = logger_setup() # pylint: disable=invalid-name
tracer = Tracer() # pylint: disable=invalid-name
@tracer.capture_method
def get_order(order_id: str) -> Optional[dict]:
"""
Returns order from DynamoDB
"""
# Send request to DynamoDB
res = table.get_item(Key={"orderId": order_id}) # pylint: disable=no-member
order = res.get("Item", None)
# Log retrieved informations
if order is None:
logger.warning({
"message": "No order retrieved for the order ID",
"orderId": order_id
})
else:
logger.debug({
"message": "Order retrieved",
"order": order
})
return order
@logger_inject_lambda_context
@tracer.capture_lambda_handler
def handler(event, _):
"""
Lambda function handler for GetOrder
"""
logger.debug({"message": "Event received", "event": event})
# Retrieve the userId
user_id = iam_user_id(event)
if user_id is not None:
logger.info({"message": "Received get order from IAM user", "userArn": user_id})
tracer.put_annotation("userArn", user_id)
tracer.put_annotation("iamUser", True)
iam_user = True
else:
logger.warning({"message": "User ID not found in event"})
return response("Unauthorized", 401)
# Retrieve the orderId
try:
order_id = event["pathParameters"]["orderId"]
except (KeyError, TypeError):
logger.warning({"message": "Order ID not found in event"})
return response("Missing orderId", 400)
# Retrieve the order from DynamoDB
order = get_order(order_id)
# Check that the order can be sent to the user
# This includes both when the item is not found and when the user IDs do
# not match.
if order is None or (not iam_user and user_id != order["userId"]):
return response("Order not found", 404)
# Send the response
return response(order)
| [] | [] | [
"ENVIRONMENT",
"TABLE_NAME"
] | [] | ["ENVIRONMENT", "TABLE_NAME"] | python | 2 | 0 | |
tools/predict.py | # -*- coding: utf-8 -*-
# @Time : 2019/8/24 12:06
# @Author : zhoujun
import os
import sys
import pathlib
import struct
__dir__ = pathlib.Path(os.path.abspath(__file__))
sys.path.append(str(__dir__))
sys.path.append(str(__dir__.parent.parent))
# project = 'DBNet.pytorch' # 工作项目根目录
# sys.path.append(os.getcwd().split(project)[0] + project)
import time
import cv2
import torch
from data_loader import get_transforms
from models import build_model
from post_processing import get_post_processing
def resize_image(img, short_size):
height, width, _ = img.shape
if height < width:
new_height = short_size
new_width = new_height / height * width
else:
new_width = short_size
new_height = new_width / width * height
new_height = int(round(new_height / 32) * 32)
new_width = int(round(new_width / 32) * 32)
resized_img = cv2.resize(img, (new_width, new_height))
return resized_img
class Pytorch_model:
def __init__(self, model_path, post_p_thre=0.7, gpu_id=None, save_wts=False):
'''
初始化pytorch模型
:param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
:param gpu_id: 在哪一块gpu上运行
'''
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % self.gpu_id)
else:
self.device = torch.device("cpu")
print('device:', self.device)
checkpoint = torch.load(model_path, map_location=self.device)
config = checkpoint['config']
config['arch']['backbone']['pretrained'] = False
self.model = build_model(config['arch'])
self.post_process = get_post_processing(config['post_processing'])
self.post_process.box_thresh = post_p_thre
self.img_mode = config['dataset']['train']['dataset']['args']['img_mode']
self.model.load_state_dict(checkpoint['state_dict'])
self.model.to(self.device)
self.model.eval()
# 保存wts
# save wts
if save_wts:
f = open('DBNet.wts', 'w')
f.write('{}\n'.format(len(self.model.state_dict().keys())))
for k, v in self.model.state_dict().items():
vr = v.reshape(-1).cpu().numpy()
f.write('{} {} '.format(k, len(vr)))
for vv in vr:
f.write(' ')
f.write(struct.pack('>f', float(vv)).hex())
f.write('\n')
self.transform = []
for t in config['dataset']['train']['dataset']['args']['transforms']:
if t['type'] in ['ToTensor', 'Normalize']:
self.transform.append(t)
self.transform = get_transforms(self.transform)
def predict(self, img_path: str, is_output_polygon=False, short_size: int = 1024):
'''
对传入的图像进行预测,支持图像地址,opecv 读取图片,偏慢
:param img_path: 图像地址
:param is_numpy:
:return:
'''
assert os.path.exists(img_path), 'file is not exists'
img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0)
#img = cv2.imread("E:\\Datasets\\ICDAR2015\\test\\img\\img_10.jpg", 1 if self.img_mode != 'GRAY' else 0)
if self.img_mode == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img.shape[:2]
#img = resize_image(img, short_size)
img = cv2.resize(img, (640, 640))
# 将图片由(w,h)变为(1,img_channel,h,w)
tensor = self.transform(img)
tensor = tensor.unsqueeze_(0)
tensor = tensor.to(self.device)
batch = {'shape': [(h, w)]}
with torch.no_grad():
if str(self.device).__contains__('cuda'):
torch.cuda.synchronize(self.device)
start = time.clock()
preds = self.model(tensor)
t = time.clock() - start
print("infer time: (ms) ", t*1000)
if str(self.device).__contains__('cuda'):
torch.cuda.synchronize(self.device)
box_list, score_list = self.post_process(batch, preds, is_output_polygon=is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0 # 去掉全为0的框
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
t = time.time() - start
return preds[0, 0, :, :].detach().cpu().numpy(), box_list, score_list, t
def export_onnx(self):
img = torch.zeros((1, 3, 640, 640)).cuda() # image size(1,3,320,192) iDetection
# tensor = self.transform(img)
# tensor = tensor.unsqueeze_(0)
#tensor = img.to(self.device)
y = self.model(img)
try:
import onnx
print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
f = ("mode.onnx") # filename
#self.model.fuse() # only for ONNX
torch.onnx.export(self.model, img, f, verbose=False, opset_version=12, input_names=['images'],
output_names=['classes', 'boxes'] if y is None else ['output'])
# Checks
onnx_model = onnx.load(f) # load onnx model
onnx.checker.check_model(onnx_model) # check onnx model
print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
print('ONNX export success, saved as %s' % f)
except Exception as e:
print('ONNX export failure: %s' % e)
def save_depoly(model, input, save_path):
traced_script_model = torch.jit.trace(model, input)
traced_script_model.save(save_path)
def init_args():
import argparse
parser = argparse.ArgumentParser(description='DBNet.pytorch')
parser.add_argument('--model_path', default=r'E:\LearningCodes\DBNET\DBNet.pytorch\model_best.pth', type=str)
parser.add_argument('--input_folder', default=r'E:\Datasets\ICDAR2015\test\img', type=str, help='img path for predict')
parser.add_argument('--output_folder', default='./test/output', type=str, help='img path for output')
parser.add_argument('--thre', default=0.3,type=float, help='the thresh of post_processing')
parser.add_argument('--polygon', action='store_true', help='output polygon or box')
parser.add_argument('--show', action='store_true', help='show result')
parser.add_argument('--save_resut', action='store_true', help='save box and score to txt file')
parser.add_argument('--save_wts', default=False, help='save box and score to txt file')
parser.add_argument('--onnx', default=False, help='save box and score to txt file')
args = parser.parse_args()
return args
if __name__ == '__main__':
import pathlib
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils.util import show_img, draw_bbox, save_result, get_file_list
args = init_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = str('0')
# 初始化网络
model = Pytorch_model(args.model_path, post_p_thre=args.thre, gpu_id=0, save_wts=args.save_wts)
if(args.onnx):
model.export_onnx()
if(args.save_wts):
exit(0)
img_folder = pathlib.Path(args.input_folder)
for img_path in tqdm(get_file_list(args.input_folder, p_postfix=['.jpg'])):
preds, boxes_list, score_list, t = model.predict(img_path, is_output_polygon=args.polygon)
img = draw_bbox(cv2.imread(img_path)[:, :, ::-1], boxes_list)
if args.show:
show_img(preds)
show_img(img, title=os.path.basename(img_path))
plt.show()
# 保存结果到路径
os.makedirs(args.output_folder, exist_ok=True)
img_path = pathlib.Path(img_path)
output_path = os.path.join(args.output_folder, img_path.stem + '_result.jpg')
pred_path = os.path.join(args.output_folder, img_path.stem + '_pred.jpg')
cv2.imwrite(output_path, img[:, :, ::-1])
cv2.imwrite(pred_path, preds * 255)
save_result(output_path.replace('_result.jpg', '.txt'), boxes_list, score_list, args.polygon)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
// Import gorm and postgres
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
// Import go-twitter modules
"github.com/dghubble/go-twitter/twitter"
"github.com/dghubble/oauth1"
// Import echo and autoload
_ "github.com/joho/godotenv/autoload"
"github.com/labstack/echo"
)
// Credentials stores all of our access/consumer tokens and secret keys needed
// for authentication against the twitter REST API.
type Credentials struct {
ConsumerKey string
ConsumerSecret string
AccessToken string
AccessTokenSecret string
}
// GetClient is a helper function that will return a twitter client
// that we can subsequently use to send tweets, or to stream new tweets
func GetClient(creds *Credentials) (*twitter.Client, error) {
// Pass in the consumer key (API Key) and your Consumer Secret (API Secret)
config := oauth1.NewConfig(creds.ConsumerKey, creds.ConsumerSecret)
// Pass in the Access Token and the Access Token Secret
token := oauth1.NewToken(creds.AccessToken, creds.AccessTokenSecret)
httpClient := config.Client(oauth1.NoContext, token)
client := twitter.NewClient(httpClient)
// Verify Credentials
verifyParams := &twitter.AccountVerifyParams{
SkipStatus: twitter.Bool(true),
IncludeEmail: twitter.Bool(true),
}
user, _, err := client.Accounts.VerifyCredentials(verifyParams)
if err != nil {
return nil, err
}
log.Printf("User's Account:\n%+v\n", user)
return client, nil
}
// GetCreds creates and returns a struct that gets the required environment
// variables for user authentication with the Twitter API
func GetCreds() Credentials {
creds := Credentials{
AccessToken: os.Getenv("ACCESS_TOKEN"),
AccessTokenSecret: os.Getenv("ACCESS_TOKEN_SECRET"),
ConsumerKey: os.Getenv("CONSUMER_KEY"),
ConsumerSecret: os.Getenv("CONSUMER_SECRET"),
}
return creds
}
// SendTweet sends a tweet with the specified text passed in as a string and
// returns a Tweet object.
func SendTweet(client *twitter.Client, tweetText string) *twitter.Tweet {
tweet, _, err := client.Statuses.Update(tweetText, nil)
if err != nil {
log.Println(err)
}
log.Printf("%+v\n", tweet)
return tweet
}
// SearchTweets searches for the given hashtag. It takes hashtag as the query
// argument (type string) as well as the twitter client. It returns a slice of
// Tweet objects.
func SearchTweets(client *twitter.Client, query string) *twitter.Search {
search, _, err := client.Search.Tweets(&twitter.SearchTweetParams{
Query: query,
})
if err != nil {
log.Print(err)
}
return search
}
// SendRetweet retweets the first retruned tweet after searching with the given
// hashtag. The hashtag must be passed in as a string along with the twitter
// client.
func SendRetweet(client *twitter.Client, searchQuery string) *twitter.Tweet {
search := SearchTweets(client, searchQuery)
retweet, _, err := client.Statuses.Retweet(search.Statuses[0].ID, &twitter.StatusRetweetParams{
ID: search.Statuses[0].ID,
})
if err != nil {
log.Print(err)
}
// log.Printf("%+v\n", resp)
log.Printf("%+v\n", retweet)
return retweet
}
// LikeTweet sends a like to the first returned tweet after searching with the
// given hashtag. The hashtag is passed in as a string along with the twitter
// client.
func LikeTweet(client *twitter.Client, searchQuery string) *twitter.Tweet {
search := SearchTweets(client, searchQuery)
like, _, err := client.Favorites.Create(&twitter.FavoriteCreateParams{
ID: search.Statuses[0].ID,
})
if err != nil {
log.Print(err)
}
log.Printf("%+v\n", like)
return like
}
// tweet is a struct that saves tweets for the user. It records the tweet ID,
// the tweet's text and the action that was taken by the user on that tweet
type tweet struct {
gorm.Model
tweetID int64
Text string
Action string // options: like, tweet, retweet
}
func initDB() *gorm.DB {
// Open DB
db, err := gorm.Open("postgres", fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", os.Getenv("host"), os.Getenv("DB_PORT"), os.Getenv("user"), os.Getenv("dbname")))
if err != nil {
log.Print(err)
}
db.AutoMigrate(&tweet{})
return db
}
func saveTweet(db *gorm.DB, tweetID int64, tweetText string, tweetAction string) *gorm.DB {
// Create a new DB entry
return db.Create(&tweet{tweetID: tweetID, Text: tweetText, Action: tweetAction})
}
func createTweet(context echo.Context) string {
newTweet := context.FormValue("tweet")
return newTweet
}
func main() {
// Get auth credentials and the Twitter client
creds := GetCreds()
client, err := GetClient(&creds)
if err != nil {
log.Println("Error getting Twitter Client")
log.Println(err)
}
searchQuery := "Golang"
db := initDB()
server := echo.New()
server.GET("/", func(context echo.Context) error {
return context.HTML(http.StatusOK, `
<a href='/search'> To search and save a tweet about Golang, click here.</a>
<br>
<br>
<a href='/like'> To like a tweet a searched tweet about Golang, click here.</a>
<br>
<br>
<a href='/tweetText'> To send a tweet, click here.</a>
<br>
<br>
<a href='/retweet'> To send a retweet a searched tweet about Golang, click here.</a>
<br>
<br>
<br>
<a href='/archive'> To see all the tweets you've interacted with, click here.</a>
`)
})
// server.GET("/archive", func(context echo.Context) error {
// // var queryList []
// query := db.Find(&tweet{})
// fmt.Println(reflect.TypeOf(query))
// // for i := range query {
// // queryList = append(queryList, query)
// // }
// return context.JSON(http.StatusOK, query.Value)
// })
server.GET("/search", func(context echo.Context) error {
search := SearchTweets(client, searchQuery)
saveTweet(db, search.Statuses[0].ID, search.Statuses[0].Text, "search")
return context.JSON(http.StatusOK, search.Statuses[0].Text)
})
server.GET("/like", func(context echo.Context) error {
search := SearchTweets(client, searchQuery)
LikeTweet(client, searchQuery)
saveTweet(db, search.Statuses[0].ID, search.Statuses[0].Text, "like")
return context.JSON(http.StatusOK, search.Statuses[0].Text)
})
server.GET("/retweet", func(context echo.Context) error {
search := SearchTweets(client, searchQuery)
SendRetweet(client, searchQuery)
saveTweet(db, search.Statuses[0].ID, search.Statuses[0].Text, "retweet")
return context.JSON(http.StatusOK, search.Statuses[0].Text)
})
server.GET("/tweetText", func(context echo.Context) error {
return context.HTML(http.StatusOK, `
<form method=POST action='/tweets'>
<label for='tweetText'>Enter the text for your tweet:</label>
<input id='tweetText' type='text' name='tweet'>
<input type='submit' value='Submit'>
</form>
`)
})
server.POST("/tweets", func(context echo.Context) error {
tweetText := createTweet(context)
SendTweet(client, tweetText)
return context.JSON(http.StatusOK, tweetText)
})
defer db.Close()
server.Logger.Fatal(server.Start(":" + os.Getenv("PORT")))
}
| [
"\"ACCESS_TOKEN\"",
"\"ACCESS_TOKEN_SECRET\"",
"\"CONSUMER_KEY\"",
"\"CONSUMER_SECRET\"",
"\"host\"",
"\"DB_PORT\"",
"\"user\"",
"\"dbname\"",
"\"PORT\""
] | [] | [
"PORT",
"CONSUMER_SECRET",
"DB_PORT",
"CONSUMER_KEY",
"host",
"dbname",
"user",
"ACCESS_TOKEN",
"ACCESS_TOKEN_SECRET"
] | [] | ["PORT", "CONSUMER_SECRET", "DB_PORT", "CONSUMER_KEY", "host", "dbname", "user", "ACCESS_TOKEN", "ACCESS_TOKEN_SECRET"] | go | 9 | 0 | |
internal/db/dbutil/dbutil.go | package dbutil
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
// Register driver
"github.com/lib/pq"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
bindata "github.com/golang-migrate/migrate/v4/source/go_bindata"
"github.com/hashicorp/go-multierror"
"github.com/inconshreveable/log15"
"github.com/opentracing/opentracing-go/ext"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel"
frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend"
)
// Transaction calls f within a transaction, rolling back if any error is
// returned by the function.
func Transaction(ctx context.Context, db *sql.DB, f func(tx *sql.Tx) error) (err error) {
finish := func(tx *sql.Tx) {
if err != nil {
if err2 := tx.Rollback(); err2 != nil {
err = multierror.Append(err, err2)
}
return
}
err = tx.Commit()
}
span, ctx := ot.StartSpanFromContext(ctx, "Transaction")
defer func() {
if err != nil {
ext.Error.Set(span, true)
span.SetTag("err", err.Error())
}
span.Finish()
}()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer finish(tx)
return f(tx)
}
// A DB captures the essential method of a sql.DB: QueryContext.
type DB interface {
QueryContext(ctx context.Context, q string, args ...interface{}) (*sql.Rows, error)
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
}
// A Tx captures the essential methods of a sql.Tx.
type Tx interface {
Rollback() error
Commit() error
}
// A TxBeginner captures BeginTx method of a sql.DB
type TxBeginner interface {
BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error)
}
// NewDB returns a new *sql.DB from the given dsn (data source name).
func NewDB(dsn, app string) (*sql.DB, error) {
cfg, err := url.Parse(dsn)
if err != nil {
return nil, errors.Wrap(err, "failed to parse dsn")
}
qry := cfg.Query()
// Force PostgreSQL session timezone to UTC.
qry.Set("timezone", "UTC")
// Force application name.
qry.Set("application_name", app)
// Set max open and idle connections
maxOpen, _ := strconv.Atoi(qry.Get("max_conns"))
if maxOpen == 0 {
maxOpen = 30
}
qry.Del("max_conns")
cfg.RawQuery = qry.Encode()
db, err := sql.Open("postgres", cfg.String())
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
}
if err := db.Ping(); err != nil {
return nil, errors.Wrap(err, "failed to ping database")
}
db.SetMaxOpenConns(maxOpen)
db.SetMaxIdleConns(maxOpen)
db.SetConnMaxLifetime(time.Minute)
return db, nil
}
// databases configures the migrations we want based on a database name. This
// configuration includes the name of the migration version table as well as
// the raw migration assets to run to migrate the target schema to a new version.
var databases = map[string]struct {
MigrationsTable string
Resource *bindata.AssetSource
}{
"frontend": {
MigrationsTable: "schema_migrations",
Resource: bindata.Resource(frontendMigrations.AssetNames(), frontendMigrations.Asset),
},
"codeintel": {
MigrationsTable: "codeintel_schema_migrations",
Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset),
},
}
// DatabaseNames returns the list of database names (configured via `dbutil.databases`)..
var DatabaseNames = func() []string {
var names []string
for databaseName := range databases {
names = append(names, databaseName)
}
return names
}()
// MigrationTables returns the list of migration table names (configured via `dbutil.databases`).
var MigrationTables = func() []string {
var migrationTables []string
for _, db := range databases {
migrationTables = append(migrationTables, db.MigrationsTable)
}
return migrationTables
}()
// NewMigrate returns a new configured migration object for the given database name. This database
// name must be present in the `dbutil.databases` map. This migration can be subsequently run by
// invoking `dbutil.DoMigrate`.
func NewMigrate(db *sql.DB, databaseName string) (*migrate.Migrate, error) {
schemaData, ok := databases[databaseName]
if !ok {
return nil, fmt.Errorf("unknown database '%s'", databaseName)
}
driver, err := postgres.WithInstance(db, &postgres.Config{
MigrationsTable: schemaData.MigrationsTable,
})
if err != nil {
return nil, err
}
d, err := bindata.WithInstance(schemaData.Resource)
if err != nil {
return nil, err
}
m, err := migrate.NewWithInstance("go-bindata", d, "postgres", driver)
if err != nil {
return nil, err
}
// In case another process was faster and runs migrations, we will wait
// this long
m.LockTimeout = 5 * time.Minute
if os.Getenv("LOG_MIGRATE_TO_STDOUT") != "" {
m.Log = stdoutLogger{}
}
return m, nil
}
// DoMigrate runs all up migrations.
func DoMigrate(m *migrate.Migrate) (err error) {
err = m.Up()
if err == nil || err == migrate.ErrNoChange {
return nil
}
if os.IsNotExist(err) {
// This should only happen if the DB is ahead of the migrations available
version, dirty, verr := m.Version()
if verr != nil {
return verr
}
if dirty { // this shouldn't happen, but checking anyways
return err
}
log15.Warn("WARNING: Detected an old version of Sourcegraph. The database has migrated to a newer version. If you have applied a rollback, this is expected and you can ignore this warning. If not, please contact [email protected] for further assistance.", "db_version", version)
return nil
}
return err
}
type stdoutLogger struct{}
func (stdoutLogger) Printf(format string, v ...interface{}) {
fmt.Printf(format, v...)
}
func (logger stdoutLogger) Verbose() bool {
return true
}
func IsPostgresError(err error, codename string) bool {
e, ok := errors.Cause(err).(*pq.Error)
return ok && e.Code.Name() == codename
}
// NullTime represents a time.Time that may be null. nullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, Time is set to the zero value.
type NullTime struct{ *time.Time }
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
*nt.Time, _ = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if nt.Time == nil {
return nil, nil
}
return *nt.Time, nil
}
// NullString represents a string that may be null. NullString implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, String is set to the zero value.
type NullString struct{ S *string }
// Scan implements the Scanner interface.
func (nt *NullString) Scan(value interface{}) error {
switch v := value.(type) {
case []byte:
*nt.S = string(v)
case string:
*nt.S = v
}
return nil
}
// Value implements the driver Valuer interface.
func (nt NullString) Value() (driver.Value, error) {
if nt.S == nil {
return nil, nil
}
return *nt.S, nil
}
// NullInt32 represents an int32 that may be null. NullInt32 implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, int32 is set to the zero value.
type NullInt32 struct{ N *int32 }
// Scan implements the Scanner interface.
func (n *NullInt32) Scan(value interface{}) error {
switch value := value.(type) {
case int64:
*n.N = int32(value)
case int32:
*n.N = value
case nil:
return nil
default:
return fmt.Errorf("value is not int64: %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (n NullInt32) Value() (driver.Value, error) {
if n.N == nil {
return nil, nil
}
return *n.N, nil
}
// NullInt64 represents an int64 that may be null. NullInt64 implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, int64 is set to the zero value.
type NullInt64 struct{ N *int64 }
// Scan implements the Scanner interface.
func (n *NullInt64) Scan(value interface{}) error {
switch value := value.(type) {
case int64:
*n.N = value
case int32:
*n.N = int64(value)
case nil:
return nil
default:
return fmt.Errorf("value is not int64: %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (n NullInt64) Value() (driver.Value, error) {
if n.N == nil {
return nil, nil
}
return *n.N, nil
}
// JSONInt64Set represents an int64 set as a JSONB object where the keys are
// the ids and the values are null. It implements the sql.Scanner interface so
// it can be used as a scan destination, similar to
// sql.NullString.
type JSONInt64Set struct{ Set *[]int64 }
// Scan implements the Scanner interface.
func (n *JSONInt64Set) Scan(value interface{}) error {
set := make(map[int64]*struct{})
switch value := value.(type) {
case nil:
case []byte:
if err := json.Unmarshal(value, &set); err != nil {
return err
}
default:
return fmt.Errorf("value is not []byte: %T", value)
}
if *n.Set == nil {
*n.Set = make([]int64, 0, len(set))
} else {
*n.Set = (*n.Set)[:0]
}
for id := range set {
*n.Set = append(*n.Set, id)
}
return nil
}
// Value implements the driver Valuer interface.
func (n JSONInt64Set) Value() (driver.Value, error) {
if n.Set == nil {
return nil, nil
}
return *n.Set, nil
}
// NullJSONRawMessage represents a json.RawMessage that may be null. NullJSONRawMessage implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString. When the scanned value is null, Raw is left as nil.
type NullJSONRawMessage struct {
Raw json.RawMessage
}
// Scan implements the Scanner interface.
func (n *NullJSONRawMessage) Scan(value interface{}) error {
switch value := value.(type) {
case nil:
case []byte:
// We make a copy here because the given value could be reused by
// the SQL driver
n.Raw = make([]byte, len(value))
copy(n.Raw, value)
default:
return fmt.Errorf("value is not []byte: %T", value)
}
return nil
}
// Value implements the driver Valuer interface.
func (n *NullJSONRawMessage) Value() (driver.Value, error) {
return n.Raw, nil
}
func PostgresDSN(prefix, currentUser string, getenv func(string) string) string {
if prefix != "" {
prefix = fmt.Sprintf("%s_", strings.ToUpper(prefix))
}
env := func(name string) string {
return getenv(prefix + name)
}
// PGDATASOURCE is a sourcegraph specific variable for just setting the DSN
if dsn := env("PGDATASOURCE"); dsn != "" {
return dsn
}
// TODO match logic in lib/pq
// https://sourcegraph.com/github.com/lib/pq@d6156e141ac6c06345c7c73f450987a9ed4b751f/-/blob/connector.go#L42
dsn := &url.URL{
Scheme: "postgres",
Host: "127.0.0.1:5432",
}
// Username preference: PGUSER, $USER, postgres
username := "postgres"
if currentUser != "" {
username = currentUser
}
if user := env("PGUSER"); user != "" {
username = user
}
if password := env("PGPASSWORD"); password != "" {
dsn.User = url.UserPassword(username, password)
} else {
dsn.User = url.User(username)
}
if host := env("PGHOST"); host != "" {
dsn.Host = host
}
if port := env("PGPORT"); port != "" {
dsn.Host += ":" + port
}
if db := env("PGDATABASE"); db != "" {
dsn.Path = db
}
if sslmode := env("PGSSLMODE"); sslmode != "" {
qry := dsn.Query()
qry.Set("sslmode", sslmode)
dsn.RawQuery = qry.Encode()
}
return dsn.String()
}
| [
"\"LOG_MIGRATE_TO_STDOUT\""
] | [] | [
"LOG_MIGRATE_TO_STDOUT"
] | [] | ["LOG_MIGRATE_TO_STDOUT"] | go | 1 | 0 | |
venv/Lib/site-packages/fitz/frontend.py | """Forward facing fitz tools with information about ecosystem."""
import os
import re
import sys
import imp
import os.path as op
import numpy as np
import subprocess
from nipype import config, logging
from .tools import make_subject_source
def gather_project_info():
"""Import project information based on environment settings."""
fitz_dir = os.environ["FITZ_DIR"]
proj_file = op.join(fitz_dir, "project.py")
try:
project = sys.modules["project"]
except KeyError:
project = imp.load_source("project", proj_file)
project_dict = dict()
for dir in ["data", "analysis", "working", "crash"]:
path = op.abspath(op.join(fitz_dir, getattr(project, dir + "_dir")))
project_dict[dir + "_dir"] = path
project_dict["default_exp"] = project.default_exp
project_dict["rm_working_dir"] = project.rm_working_dir
if hasattr(project, "ants_normalization"):
use_ants = project.ants_normalization
project_dict["normalization"] = "ants" if use_ants else "fsl"
else:
project_dict["normalization"] = "fsl"
return project_dict
def gather_experiment_info(exp_name=None, model=None):
"""Import an experiment module and add some formatted information."""
fitz_dir = os.environ["FITZ_DIR"]
# Allow easy use of default experiment
if exp_name is None:
project = gather_project_info()
exp_name = project["default_exp"]
# Import the base experiment
try:
exp = sys.modules[exp_name]
except KeyError:
exp_file = op.join(fitz_dir, exp_name + ".py")
exp = imp.load_source(exp_name, exp_file)
exp_dict = {}
def keep(k):
return not re.match("__.*__", k)
exp_dict.update({k: v for k, v in exp.__dict__.items() if keep(k)})
# Possibly import the alternate model details
if model is not None:
check_modelname(model, exp_name)
try:
mod = sys.modules[model]
except KeyError:
model_file = op.join(fitz_dir, "%s-%s.py" % (exp_name, model))
mod = imp.load_source(model, model_file)
mod_dict = {k: v for k, v in mod.__dict__.items() if keep(k)}
# Update the base information with the altmodel info
exp_dict.update(mod_dict)
# Save the __doc__ attribute to the dict
exp_dict["comments"] = "" if exp.__doc__ is None else exp.__doc__
if model is not None:
exp_dict["comments"] += "" if mod.__doc__ is None else mod.__doc__
do_lyman_tweaks(exp_dict)
return exp_dict
def do_lyman_tweaks(exp_dict):
# Check if it looks like this is a partial FOV acquisition
exp_dict["partial_brain"] = bool(exp_dict.get("whole_brain_template"))
# Temporal resolution.
if "TR" in exp_dict.keys():
exp_dict["TR"] = float(exp_dict["TR"])
# Set up the default contrasts
if ("condition_names" in exp_dict.keys() and
exp_dict["condition_names"] is not None):
cs = [(name, [name], [1]) for name in exp_dict["condition_names"]]
exp_dict["contrasts"] = cs + exp_dict["contrasts"]
if "contrast_names" in exp_dict.keys():
# Build contrasts list if neccesary
exp_dict["contrast_names"] = [c[0] for c in exp_dict["contrasts"]]
def determine_subjects(subject_arg=None):
"""Intelligently find a list of subjects in a variety of ways."""
if subject_arg is None:
subject_file = op.join(os.environ["FITZ_DIR"], "subjects.txt")
subjects = np.loadtxt(subject_file, str, ndmin=1).tolist()
elif op.isfile(subject_arg[0]):
subjects = np.loadtxt(subject_arg[0], str).tolist()
else:
try:
subject_file = op.join(os.environ["FITZ_DIR"],
subject_arg[0] + ".txt")
subjects = np.loadtxt(subject_file, str).tolist()
except IOError:
subjects = subject_arg
return subjects
def determine_engine(args):
"""Read command line args and return Workflow.run() args."""
plugin_dict = dict(linear="Linear", multiproc="MultiProc",
ipython="IPython", torque="PBS", sge="SGE",
slurm="SLURM")
plugin = plugin_dict[args.plugin]
plugin_args = dict()
qsub_args = ""
if plugin == "MultiProc":
plugin_args['n_procs'] = args.nprocs
elif plugin in ["SGE", "PBS"]:
qsub_args += "-V -e /dev/null -o /dev/null "
if args.queue is not None:
qsub_args += "-q %s " % args.queue
plugin_args["qsub_args"] = qsub_args
return plugin, plugin_args
def update_params(wf_module, exp):
# print sys.path, dir(wf_module), wf_module.__name__, wf_module.__file__
try:
params = wf_module.default_parameters
except IOError:
print "Workflow must define a default_parameters method!"
raise
# default_names = set(params.keys())
params.update(exp)
# current_names = set(params.keys())
# print default_names, current_names
# new_names = current_names - default_names
# print new_names
# if len(new_names):
# msg = ["You may have an invalid configuration:"]
# for name in new_names:
# msg.append("A value for %s was specified, " % name +
# "but not expected by the workflow")
# raise Exception(msg)
return params
def check_modelname(model, exp_name):
"""Do Some sanity checks on the model"""
err = []
if model.endswith('.py'):
err.append("Don't include '.py' when listing your model. ")
if model.startswith(exp_name):
err.append("Don't include the experiment name (%s) " % exp_name +
"when listing your model.")
if len(err):
err.insert(0, "Problem with the way you specified your model on " +
"the commandline:")
raise IOError('\n- '.join(err))
def run(args):
"""Get and process specific information"""
project = gather_project_info()
exp = gather_experiment_info(args.experiment, args.model)
# Subject is always highest level of parameterization
subject_list = determine_subjects(args.subjects)
subj_source = make_subject_source(subject_list)
# Get the full correct name for the experiment
if args.experiment is None:
exp_name = project["default_exp"]
else:
exp_name = args.experiment
exp['exp_name'] = exp_name
exp['model_name'] = args.model if args.model else ''
# Set roots of output storage
project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
project['working_dir'] = op.join(project["working_dir"], exp_name,
exp['model_name'])
config.set("execution", "crashdump_dir", project["crash_dir"])
if args.verbose > 0:
config.set("logging", "filemanip_level", 'DEBUG')
config.enable_debug_mode()
logging.update_logging(config)
if not op.exists(project['analysis_dir']):
os.makedirs(project['analysis_dir'])
workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
'workflows')
if not op.isdir(workflows_dir):
missing_pipe = 'raise'
if missing_pipe == 'install':
install(args)
else:
raise IOError("Run `fitz install` to set up your pipeline of "
"workflows, %s does not exist." % workflows_dir)
sys.path.insert(0, workflows_dir)
for wf_name in args.workflows:
try:
mod = imp.find_module(wf_name)
wf_module = imp.load_module("wf", *mod)
except (IOError, ImportError):
print "Could not find any workflows matching %s" % wf_name
raise
params = update_params(wf_module, exp)
workflow = wf_module.workflow_manager(
project, params, args, subj_source)
# Run the pipeline
plugin, plugin_args = determine_engine(args)
workflow.write_graph(str(workflow)+'.dot', format='svg')
if not args.dontrun:
workflow.run(plugin, plugin_args)
def install(args):
project = gather_project_info()
exp = gather_experiment_info(project['default_exp'])
cmd = ['git', 'clone', exp['pipeline_src']]
workflow_base = os.path.splitext(os.path.basename(exp['pipeline_src']))[0]
print workflow_base
print ' '.join(cmd)
if not os.path.isdir(workflow_base):
subprocess.check_call(cmd)
else:
print "Workflow %s already exists." % workflow_base
cmd = ['git', 'checkout', exp['pipeline_version']]
print ' '.join(cmd)
try:
subprocess.check_call(cmd, cwd=workflow_base)
except:
print "Error checking out tag %s" % exp['pipeline_version']
# raise
| [] | [] | [
"FITZ_DIR"
] | [] | ["FITZ_DIR"] | python | 1 | 0 | |
cli/create_admin.go | // Copyright 2018 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package cli // import "miniflux.app/cli"
import (
"fmt"
"os"
"miniflux.app/logger"
"miniflux.app/model"
"miniflux.app/storage"
)
func createAdmin(store *storage.Storage) {
user := model.NewUser()
user.Username = os.Getenv("ADMIN_USERNAME")
user.Password = os.Getenv("ADMIN_PASSWORD")
user.IsAdmin = true
if user.Username == "" || user.Password == "" {
user.Username, user.Password = askCredentials()
}
if err := user.ValidateUserCreation(); err != nil {
fmt.Println(err)
os.Exit(1)
}
if store.UserExists(user.Username) {
logger.Info(`User %q already exists, skipping creation`, user.Username)
return
}
if err := store.CreateUser(user); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
| [
"\"ADMIN_USERNAME\"",
"\"ADMIN_PASSWORD\""
] | [] | [
"ADMIN_USERNAME",
"ADMIN_PASSWORD"
] | [] | ["ADMIN_USERNAME", "ADMIN_PASSWORD"] | go | 2 | 0 | |
server/vcr-server/agent_webhooks/views.py | import json
import logging
import time
import os
import random
import base64
from django.conf import settings
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from api.v2.models.Credential import Credential as CredentialModel
from api.v2.utils import log_timing_method, log_timing_event, call_agent_with_retry
from agent_webhooks.utils.credential import Credential, CredentialManager
from agent_webhooks.utils.issuer import IssuerManager
LOGGER = logging.getLogger(__name__)
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_activity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_CREDENTIALS_2_0 = "issue_credential_v2_0"
TOPIC_CREDENTIALS_2_0_INDY = "issue_credential_v2_0_indy"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_PRESENT_PROOF = "present_proof"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
PROCESS_INBOUND_CREDENTIALS = os.environ.get('PROCESS_INBOUND_CREDENTIALS', 'true')
if PROCESS_INBOUND_CREDENTIALS.upper() == "TRUE":
LOGGER.debug(">>> YES processing inbound credentials")
PROCESS_INBOUND_CREDENTIALS = True
else:
LOGGER.error(">>> NO not processing inbound credentials")
PROCESS_INBOUND_CREDENTIALS = False
RANDOM_ERRORS = os.environ.get('RANDOM_ERRORS', 'false').upper() == "TRUE"
if RANDOM_ERRORS:
LOGGER.error(">>> YES generating random credential processing errors")
@swagger_auto_schema(method="post", auto_schema=None)
@api_view(["POST"])
@permission_classes((permissions.AllowAny,))
def agent_callback(request, topic):
message = request.data
if "state" not in message:
LOGGER.warn(f"Received aca-py webhook without state. message={message}")
state = message["state"] if "state" in message else None
LOGGER.debug(f"Received aca-py webhook. state={state} message={message}")
start_time = time.perf_counter()
method = "agent_callback." + topic + ("." + state if state else "")
log_timing_event(method, message, start_time, None, False)
# dispatch based on the topic type
if topic == TOPIC_CONNECTIONS:
response = Response("")
elif topic == TOPIC_CONNECTIONS_ACTIVITY:
response = Response("")
elif topic == TOPIC_CREDENTIALS:
response = handle_credentials(state, message)
elif topic == TOPIC_CREDENTIALS_2_0:
response = handle_credentials_2_0(state, message)
elif topic == TOPIC_CREDENTIALS_2_0_INDY:
response = handle_credentials_2_0(state, message)
elif topic == TOPIC_PRESENTATIONS or topic == TOPIC_PRESENT_PROOF:
response = handle_presentations(state, message)
elif topic == TOPIC_GET_ACTIVE_MENU:
response = Response("")
elif topic == TOPIC_PERFORM_MENU_ACTION:
response = Response("")
elif topic == TOPIC_ISSUER_REGISTRATION:
response = handle_register_issuer(message)
else:
LOGGER.info("Callback: topic=" + topic + ", message=" + json.dumps(message))
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, False)
log_timing_event(method, message, start_time, end_time, False)
return Response("Invalid topic: " + topic, status=status.HTTP_400_BAD_REQUEST)
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
log_timing_event(method, message, start_time, end_time, True)
return response
# create one global manager instance
credential_manager = CredentialManager()
def handle_credentials(state, message):
"""
Receives notification of a credential processing event.
For example, for a greenlight registration credential:
message = {
"connection_id": "12345",
"credential_definition_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag",
"credential_exchange_id": "666",
"credential_id": "67890",
"credential_offer": {},
"credential_request": {},
"credential_request_metadata": {},
"credential": {
"referent": "67892",
"values":
{
"address_line_1": "2230 Holdom Avenue",
"address_line_2": "",
"addressee": "Ms. Brenda J Strachan",
"city": "Surrey",
"corp_num": "FM0243624",
"country": "CA",
"entity_name_effective": "2007-08-30",
"entity_status": "Active",
"entity_status_effective": "2007-08-30",
"entity_type": "BC Company",
"legal_name": "LOEFFLER PIZZA PLACE LIMITED",
"postal_code": "V3T 4Y5",
"province": "BC",
"reason_description": "Filing:REGST",
"registration_date": "2007-08-30"
},
"schema_id": "6qnvgJtqwK44D8LFYnV5Yf:2:Registered Corporation:1.0.3",
"cred_def_id": "6qnvgJtqwK44D8LFYnV5Yf:3:CL:25:tag",
"rev_reg_id": null,
"rev_reg": null,
"witness": "Ian",
"cred_rev_id": null,
"signature": "ian costanzo, honest",
"signature_correctness_proof": "honest"
},
"initiator": "...",
"schema_id": "...",
"state": "stored",
"thread_id": "..."
}
"""
credential_exchange_id = message["credential_exchange_id"]
LOGGER.debug(
f'Credential: state="{state}" credential_exchange_id="{credential_exchange_id}"'
)
response_data = {}
try:
if state == "offer_received":
LOGGER.debug("After receiving credential offer, send credential request")
# no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set
response_data = {
"success": True,
"details": f"Received offer on credential exchange {credential_exchange_id}",
}
elif state == "credential_received":
raw_credential = message["raw_credential"]
# You can include this exception to test error reporting
if RANDOM_ERRORS:
raise_random_exception(credential_exchange_id, 'credential_recieved')
credential_data = {
"thread_id": message["thread_id"],
"schema_id": raw_credential["schema_id"],
"cred_def_id": raw_credential["cred_def_id"],
"rev_reg_id": raw_credential["rev_reg_id"] if "rev_reg_id" in raw_credential else None,
"attrs": {},
}
for attr in raw_credential["values"]:
credential_data["attrs"][attr] = raw_credential["values"][attr]["raw"]
return receive_credential(credential_exchange_id, credential_data)
elif state == "stored":
LOGGER.debug("Credential Stored")
response_data = {"success": True, "details": "Credential Stored"}
except Exception as e:
LOGGER.error(e)
LOGGER.error(f"Send problem report for {credential_exchange_id}")
# Send a problem report for the error
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/issue-credential/records/{credential_exchange_id}/problem-report",
post_method=True,
payload={"description": str(e)},
headers=settings.ADMIN_REQUEST_HEADERS,
)
resp.raise_for_status()
return Response({"success": False, "error": str(e)})
return Response(response_data)
def handle_presentations(state, message):
LOGGER.debug(f" >>>> handle_presentations({state})")
if state == "request_received":
presentation_request = message["presentation_request"]
presentation_exchange_id = message["presentation_exchange_id"]
# Pull referents out of presentation request
requested_attribute_referents = list(
presentation_request["requested_attributes"].keys()
)
requested_predicates_referents = list(
presentation_request["requested_predicates"].keys()
)
# Comma delimit all referents for agent API request
referents = ",".join(
requested_attribute_referents + requested_predicates_referents
)
credentials = []
if presentation_request["name"].startswith("cred_id::"):
cred_id = presentation_request["name"][9:]
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/credential/"
+ f"{cred_id}",
post_method=False,
headers=settings.ADMIN_REQUEST_HEADERS,
)
resp.raise_for_status()
wallet_credential = resp.json()
wallet_credentials = {
"cred_info": wallet_credential,
"interval": None,
"presentation_referents": requested_attribute_referents + requested_predicates_referents
}
credentials = [wallet_credentials, ]
credential_query = presentation_request["name"]
if 0 == len(credentials):
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/present-proof/records/"
+ f"{message['presentation_exchange_id']}/credentials/"
+ f"{referents}",
post_method=False,
headers=settings.ADMIN_REQUEST_HEADERS,
)
resp.raise_for_status()
# All credentials from wallet that satisfy presentation request
credentials = resp.json()
credential_query = f"/present-proof/records/{message['presentation_exchange_id']}/credentials/{referents}"
# Prep the payload we need to send to the agent API
credentials_for_presentation = {
"self_attested_attributes": {},
"requested_attributes": {},
"requested_predicates": {},
}
for referent in requested_attribute_referents:
credentials_for_presentation["requested_attributes"][referent] = {}
for referent in requested_predicates_referents:
credentials_for_presentation["requested_predicates"][referent] = {}
# we should have a single credential at this point
results_length = len(credentials)
if results_length != 1:
raise Exception(
"Number of credentials returned by query "
+ f"{credential_query} was not 1, it was {results_length}"
)
credential = credentials[0]
credential_id = credential["cred_info"]["referent"]
# For all "presentation_referents" on this `credential` returned
# from the wallet, we can apply this credential_id as the selected
# credential for the presentation
for related_referent in credential["presentation_referents"]:
if (
related_referent
in credentials_for_presentation["requested_attributes"]
):
credentials_for_presentation["requested_attributes"][
related_referent
]["cred_id"] = credential_id
credentials_for_presentation["requested_attributes"][
related_referent
]["revealed"] = True
elif (
related_referent
in credentials_for_presentation["requested_predicates"]
):
credentials_for_presentation["requested_predicates"][
related_referent
]["cred_id"] = credential_id
credentials_for_presentation["requested_predicates"][
related_referent
]["revealed"] = True
else:
raise Exception(
f"Referent {related_referent} returned from wallet "
+ "was not expected in proof request."
)
# We should have a fully constructed presentation now.
# Let's check to make sure:
for referent in credentials_for_presentation["requested_attributes"]:
if credentials_for_presentation["requested_attributes"][referent] == {}:
raise Exception(
f"requested_attributes contains unfulfilled referent {referent}"
)
for referent in credentials_for_presentation["requested_predicates"]:
if credentials_for_presentation["requested_predicates"][referent] == {}:
raise Exception(
f"requested_predicates contains unfulfilled referent {referent}"
)
# Finally, we should be able to send this payload to the agent for it
# to finish the process and send the presentation back to the verifier
# (to be verified)
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/present-proof/records/"
+ f"{presentation_exchange_id}/send-presentation",
post_method=True,
payload=credentials_for_presentation,
headers=settings.ADMIN_REQUEST_HEADERS,
)
resp.raise_for_status()
return Response()
def handle_register_issuer(message):
"""Handles the registration of a new issuing agent in the credential registry.
The agent registration credential will be in the following format:
{
"issuer_registration_id": "string",
"connection_id": "string",
"issuer_registration": {
"credential_types": [
{
"category_labels": {"category": "label"},
"claim_descriptions": {"claim": "description"},
"claim_labels": {"claim": "label"},
"credential_def_id": "string",
"schema": "string",
"version": "string",
"name": "string",
"credential": {
"effective_date": {"input": "topic_id", "from": "claim"}
},
"topic": [
{
"source_id": {"input": "topic_id", "from": "claim"}
}
],
"endpoint": "string",
"cardinality_fields": ["string"],
"mapping": {},
"visible_fields": ["string"],
"logo_b64": "string",
}
],
"issuer": {
"name": "string",
"did": "string",
"abbreviation": "string",
"email": "string",
"url": "string",
"endpoint": "string",
"logo_b64": "string"
}
}
}
"""
issuer_manager = IssuerManager()
updated = issuer_manager.register_issuer(message)
# reset the global CredentialManager instance (to clear the CredentialType cache)
global credential_manager
credential_manager = CredentialManager()
# update tagging policy
tag_policy_updates = {}
cred_types = updated.credential_types
for ctype in cred_types:
tag_attrs = ctype.get_tagged_attributes()
if tag_attrs:
tag_policy_updates[ctype.credential_def_id] = tag_attrs
return Response(
content_type="application/json", data={"result": updated.serialize()}
)
def handle_credentials_2_0(state, message):
"""
Receives notification of a credential 2.0 processing event.
For example, for a registration credential:
message = {
"cred_proposal": { ... },
"role": "issuer",
"initiator": "self",
"created_at": "2021-04-30 02:54:32.925351Z",
"conn_id": "ae5f0b97-746e-4062-bdf2-27b9d6809cc9",
"auto_issue": true,
"cred_preview": { ... },
"cred_ex_id": "e2f41814-d625-4218-9f53-879111398372",
"cred_request": { ... },
"auto_offer": false,
"state": "credential-issued",
"updated_at": "2021-04-30 02:54:33.138119Z",
"cred_issue": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/2.0/issue-credential",
"@id": "0f0104e6-43ca-47e1-85e0-4fd41b10688f",
"formats": [
{
"attach_id": "0",
"format": "hlindy-zkp-v1.0"
}
],
"credentials~attach": [
{
"@id": "0",
"mime-type": "application/json",
"data": {
"base64": "..."
}
}
]
},
"cred_offer": { ... },
"thread_id": "dd56313f-1787-47f7-8838-d6931284ae30"
}
"""
cred_ex_id = message["cred_ex_id"]
LOGGER.debug(f'Credential: state="{state}" cred_ex_id="{cred_ex_id}"')
response_data = {}
try:
if state == "offer-received":
LOGGER.debug("After receiving credential offer, send credential request")
# no need to perform a task, we run the agent with the --auto-respond-credential-offer flag set
response_data = {
"success": True,
"details": f"Received offer on credential exchange {cred_ex_id}",
}
elif state == "credential-received":
cred_issue = message["cred_issue"]
if cred_issue is None:
LOGGER.error(" >>> Error cred_issue missing for " + cred_ex_id)
return Response("Error cred_issue missing for credential", status=status.HTTP_400_BAD_REQUEST)
# You can include this exception to test error reporting
if RANDOM_ERRORS:
raise_random_exception(cred_ex_id, 'credential-recieved')
cred_data = {}
for cred_fmt in cred_issue["formats"]:
att_id = cred_fmt["attach_id"]
cred_att = [att for att in cred_issue["credentials~attach"] if att["@id"] == att_id][0]
if cred_att is None:
LOGGER.error(
" >>> Error data cred_att could not be parsed for " + cred_ex_id)
return Response("Error credential attachment could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST)
cred_raw_base64 = cred_att["data"]["base64"]
cred_raw = json.loads(base64.b64decode(cred_raw_base64))
if cred_raw is None:
LOGGER.error(
" >>> Error data cred_issue could not be parsed for " + cred_ex_id)
return Response("Error credential data could not be parsed from cred_issue", status=status.HTTP_400_BAD_REQUEST)
cred_data = {
"thread_id": cred_issue["~thread"]["thid"],
"schema_id": cred_raw["schema_id"],
"cred_def_id": cred_raw["cred_def_id"],
"rev_reg_id": cred_raw["rev_reg_id"] if "rev_reg_id" in cred_raw else None,
"attrs": {k: v["raw"] for k, v in cred_raw["values"].items()},
}
return receive_credential(cred_ex_id, cred_data, "2.0")
else:
LOGGER.warn(f"Handler for state: {state} not implemented")
response_data = {"success": True, "details": f"State received {state}"}
except Exception as e:
LOGGER.error(e)
LOGGER.error(f"Send problem report for {cred_ex_id}")
# Send a problem report for the error
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/issue-credential-2.0/records/{cred_ex_id}/problem-report",
post_method=True,
payload={
"description": str(e)
},
headers=settings.ADMIN_REQUEST_HEADERS,
)
resp.raise_for_status()
return Response({"success": False, "error": str(e)})
return Response(response_data)
def receive_credential(cred_ex_id, cred_data, v=None):
try:
existing = False
if PROCESS_INBOUND_CREDENTIALS:
credential = Credential(cred_data)
# sanity check that we haven't received this credential yet
cred_id = credential.thread_id
existing_credential = CredentialModel.objects.filter(credential_id=cred_id)
if 0 < len(existing_credential):
# TODO - credential already exists in the database, what to do?
LOGGER.error(" >>> Received duplicate for credential_id: "
+ cred_id + ", exch id: " + cred_ex_id)
existing = True
ret_cred_id = cred_id
else:
# new credential, populate database
credential = credential_manager.process(credential)
ret_cred_id = credential.credential_id
else:
ret_cred_id = cred_data["thread_id"]
# check if the credential is in the wallet already
if existing:
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}",
post_method=False,
headers=settings.ADMIN_REQUEST_HEADERS,
)
if resp.status_code == 404:
existing = False
# Instruct the agent to store the credential in wallet
if not existing:
# post with retry - if returned status is 503 unavailable retry a few times
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/issue-credential{'-' + v if v else ''}/records/{cred_ex_id}/store",
post_method=True,
payload={"credential_id": ret_cred_id},
headers=settings.ADMIN_REQUEST_HEADERS,
)
if resp.status_code == 404:
# TODO assume the credential exchange has completed?
resp = call_agent_with_retry(
f"{settings.AGENT_ADMIN_URL}/credential/{ret_cred_id}",
post_method=False,
headers=settings.ADMIN_REQUEST_HEADERS,
)
if resp.status_code == 404:
LOGGER.error(
" >>> Error cred exchange id is missing but credential is not available for " + cred_ex_id + ", " + ret_cred_id)
return Response("Error cred exchange id is missing but credential is not available", status=status.HTTP_400_BAD_REQUEST)
pass
else:
resp.raise_for_status()
response_data = {
"success": True,
"details": f"Received credential with id {ret_cred_id}",
}
return Response(response_data)
except Exception as e:
raise e
def raise_random_exception(cred_ex_id, method=""):
if 1 == random.randint(1, 50):
print(f"Raise random exception for {cred_ex_id} from method: {method}")
raise Exception("Deliberate error to test problem reporting")
| [] | [] | [
"PROCESS_INBOUND_CREDENTIALS",
"RANDOM_ERRORS"
] | [] | ["PROCESS_INBOUND_CREDENTIALS", "RANDOM_ERRORS"] | python | 2 | 0 | |
cmd/root.go | package cmd
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
"os/signal"
"runtime"
)
const (
// ExitSetupFailed defines exit code
ExitSetupFailed = 1
)
var (
configPath string
defaultConfigPath string
logLevel string
rootCmd = &cobra.Command{
Use: "wiretrustee",
Short: "",
Long: "",
}
)
// Execute executes the root command.
func Execute() error {
return rootCmd.Execute()
}
func init() {
defaultConfigPath = "/etc/wiretrustee/config.json"
if runtime.GOOS == "windows" {
defaultConfigPath = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\" + "config.json"
}
rootCmd.PersistentFlags().StringVar(&configPath, "config", defaultConfigPath, "Wiretrustee config file location to write new config to")
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "")
rootCmd.AddCommand(initCmd)
rootCmd.AddCommand(addPeerCmd)
rootCmd.AddCommand(upCmd)
rootCmd.AddCommand(signalCmd)
rootCmd.AddCommand(serviceCmd)
serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd) // service control commands are subcommands of service
serviceCmd.AddCommand(installCmd, uninstallCmd) // service installer commands are subcommands of service
}
// SetupCloseHandler handles SIGTERM signal and exits with success
func SetupCloseHandler() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
fmt.Println("\r- Ctrl+C pressed in Terminal")
stopUP <- 0
}
}()
}
// InitLog parses and sets log-level input
func InitLog(logLevel string) {
level, err := log.ParseLevel(logLevel)
if err != nil {
log.Errorf("Failed parsing log-level %s: %s", logLevel, err)
os.Exit(ExitSetupFailed)
}
log.SetLevel(level)
}
| [
"\"PROGRAMDATA\""
] | [] | [
"PROGRAMDATA"
] | [] | ["PROGRAMDATA"] | go | 1 | 0 | |
agents/eval/evaluate_vision_dagger.py | import copy
import numpy as np
import torch
import os
import sys
sys.path.insert(0, os.environ['ALFRED_ROOT'])
from agents.utils.misc import extract_admissible_commands
def evaluate_vision_dagger(env, agent, num_games, debug=False):
env.seed(42)
agent.eval()
episode_no = 0
res_points, res_steps, res_gcs = [], [], []
res_info = []
with torch.no_grad():
while(True):
if episode_no >= num_games:
break
obs, infos = env.reset()
game_names = infos["extra.gamefile"]
batch_size = len(obs)
agent.init(batch_size)
previous_dynamics = None
execute_actions = []
prev_step_dones, prev_rewards = [], []
for _ in range(batch_size):
execute_actions.append("restart")
prev_step_dones.append(0.0)
prev_rewards.append(0.0)
observation_strings = list(obs)
observation_strings = agent.preprocess_observation(observation_strings)
task_desc_strings, observation_strings = agent.get_task_and_obs(observation_strings)
first_sight_strings = copy.deepcopy(observation_strings)
agent.observation_pool.push_first_sight(first_sight_strings)
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
# extract exploration frame features
if agent.use_exploration_frame_feats:
exploration_frames = env.get_exploration_frames()
exploration_frame_feats = agent.extract_exploration_frame_feats(exploration_frames)
if debug:
print(first_sight_strings[0])
print(task_desc_strings[0])
still_running_mask = []
sequence_game_points = []
goal_condition_points = []
print_actions = []
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= (episode_no - batch_size) % agent.report_frequency)
for step_no in range(agent.max_nb_steps_per_episode):
# get visual features
current_frames = env.get_frames()
observation_feats = agent.extract_visual_features(current_frames)
# add exploration features if specified
if agent.use_exploration_frame_feats:
observation_feats = [torch.cat([ef, obs], dim=0) for ef, obs in zip(exploration_frame_feats, observation_feats)]
# predict actions
if agent.action_space == "generation":
execute_actions, current_dynamics = agent.command_generation_greedy_generation(observation_feats, task_desc_strings, previous_dynamics)
else:
raise NotImplementedError()
obs, _, dones, infos = env.step(execute_actions)
scores = [float(item) for item in infos["won"]]
gcs =[float(item) for item in infos["goal_condition_success_rate"]] if "goal_condition_success_rate" in infos else [0.0]*batch_size
dones = [float(item) for item in dones]
if debug:
print(execute_actions[0])
print(obs[0])
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
previous_dynamics = current_dynamics
if step_no == agent.max_nb_steps_per_episode - 1:
# terminate the game because DQN requires one extra step
dones = [1.0 for _ in dones]
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
step_rewards = [float(curr) - float(prev) for curr, prev in zip(scores, prev_rewards)] # list of float
prev_rewards = scores
sequence_game_points.append(step_rewards)
goal_condition_points.append(gcs)
still_running_mask.append(still_running)
print_actions.append(execute_actions[0] if still_running[0] else "--")
# if all ended, break
if np.sum(still_running) == 0:
break
game_steps = np.sum(np.array(still_running_mask), 0).tolist() # batch
game_points = np.max(np.array(sequence_game_points), 0).tolist() # batch
game_gcs = np.max(np.array(goal_condition_points), 0).tolist() # batch
for i in range(batch_size):
if len(res_points) >= num_games:
break
res_points.append(game_points[i])
res_gcs.append(game_gcs[i])
res_steps.append(game_steps[i])
res_info.append("/".join(game_names[i].split("/")[-3:-1]) + ", score: " + str(game_points[i]) + ", step: " + str(game_steps[i]))
# finish game
agent.finish_of_episode(episode_no, batch_size)
episode_no += batch_size
if not report:
continue
print("Model: {:s} | Episode: {:3d} | {:s} | game points: {:2.3f} | game goal-condition points: {:2.3f} | game steps: {:2.3f}".format(agent.experiment_tag, episode_no, game_names[0], np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)))
# print(game_id + ": " + " | ".join(print_actions))
print(" | ".join(print_actions))
average_points, average_gc_points, average_steps = np.mean(res_points), np.mean(res_gcs), np.mean(res_steps)
print("================================================")
print("eval game points: " + str(average_points) + ", eval game goal-condition points : " + str(average_gc_points) + ", eval game steps: " + str(average_steps))
for item in res_info:
print(item)
return {
'average_points': average_points,
'average_goal_condition_points': average_gc_points,
'average_steps': average_steps,
'res_points': res_points,
'res_gcs': res_gcs,
'res_steps': res_steps,
'res_info': res_info
}
| [] | [] | [
"ALFRED_ROOT"
] | [] | ["ALFRED_ROOT"] | python | 1 | 0 | |
fuzzers/045-hclk-cmt-pips/generate.py | #!/usr/bin/env python3
from prjxray.segmaker import Segmaker
import os
import os.path
def bitfilter(frame, word):
if frame < 26:
return False
return True
IOCLK_MAP = {
'HCLK_IOI_I2IOCLK_TOP0': 'HCLK_CMT_CCIO0',
'HCLK_IOI_I2IOCLK_TOP1': 'HCLK_CMT_CCIO1',
'HCLK_IOI_I2IOCLK_BOT0': 'HCLK_CMT_CCIO2',
'HCLK_IOI_I2IOCLK_BOT1': 'HCLK_CMT_CCIO3',
}
IOCLK_SRCS = set(IOCLK_MAP.values())
def main():
segmk = Segmaker("design.bits")
tiledata = {}
pipdata = {}
ignpip = set()
tile_ports = {}
with open(os.path.join(os.getenv('FUZDIR'), '..', 'piplist', 'build',
'hclk_cmt', 'hclk_cmt.txt')) as f:
for l in f:
tile_type, dst, src = l.strip().split('.')
if tile_type not in pipdata:
pipdata[tile_type] = []
tile_ports[tile_type] = set()
pipdata[tile_type].append((src, dst))
tile_ports[tile_type].add(src)
tile_ports[tile_type].add(dst)
with open(os.path.join(os.getenv('FUZDIR'), '..', 'piplist', 'build',
'hclk_cmt', 'hclk_cmt_l.txt')) as f:
for l in f:
tile_type, dst, src = l.strip().split('.')
if tile_type not in pipdata:
pipdata[tile_type] = []
tile_ports[tile_type] = set()
pipdata[tile_type].append((src, dst))
tile_ports[tile_type].add(src)
tile_ports[tile_type].add(dst)
tile_to_cmt = {}
cmt_to_hclk_cmt = {}
with open(os.path.join(os.getenv('FUZDIR'), 'build',
'cmt_regions.csv')) as f:
for l in f:
site, cmt, tile = l.strip().split(',')
tile_to_cmt[tile] = cmt
if tile.startswith('HCLK_CMT'):
cmt_to_hclk_cmt[cmt] = tile
active_ioclks = set()
print("Loading tags from design.txt.")
with open("design.txt", "r") as f:
for line in f:
tile, pip, src, dst, pnum, pdir = line.split()
pip_prefix, _ = pip.split(".")
tile_from_pip, tile_type = pip_prefix.split('/')
assert tile == tile_from_pip
_, src = src.split("/")
_, dst = dst.split("/")
pnum = int(pnum)
pdir = int(pdir)
if src in IOCLK_MAP:
active_ioclks.add(
(cmt_to_hclk_cmt[tile_to_cmt[tile]], IOCLK_MAP[src]))
if not tile.startswith('HCLK_CMT'):
continue
if tile not in tiledata:
tiledata[tile] = {
"type": tile_type,
"pips": set(),
"srcs": set(),
"dsts": set()
}
tiledata[tile]["pips"].add((src, dst))
tiledata[tile]["srcs"].add(src)
tiledata[tile]["dsts"].add(dst)
if pdir == 0:
tiledata[tile]["srcs"].add(dst)
tiledata[tile]["dsts"].add(src)
if pnum == 1 or pdir == 0:
ignpip.add((src, dst))
for tile, pips_srcs_dsts in tiledata.items():
tile_type = pips_srcs_dsts["type"]
pips = pips_srcs_dsts["pips"]
for src, dst in pipdata[tile_type]:
if (src, dst) in ignpip:
pass
elif (src, dst) in pips:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 1)
elif dst not in tiledata[tile]["dsts"]:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 0)
for port in tile_ports[tile_type]:
# These ones do not have any outgoing connections from the tile.
if "FREQ_REF" in port:
continue
# There seems to be no special bits related to use of
# HCLK_CMT_MUX_CLKINT_n wires.
if "HCLK_CMT_MUX_CLKINT" in port:
continue
# It seems that CCIOn_USED is not enabled when a net goes through
# FREQ_REFn. Do not emit this tag if this happens.
if "CCIO" in port:
n = int(port[-1])
dst = "HCLK_CMT_MUX_OUT_FREQ_REF{}".format(n)
if dst in tiledata[tile]["dsts"]:
continue
if port in tiledata[tile]["dsts"] or port in tiledata[tile]["srcs"]:
segmk.add_tile_tag(tile, "{}_USED".format(port), 1)
else:
segmk.add_tile_tag(tile, "{}_USED".format(port), 0)
for ioclk in IOCLK_SRCS:
if ioclk in tiledata[tile]["srcs"] or (tile,
ioclk) in active_ioclks:
segmk.add_tile_tag(tile, "{}_ACTIVE".format(ioclk), 1)
else:
segmk.add_tile_tag(tile, "{}_ACTIVE".format(ioclk), 0)
segmk.compile(bitfilter=bitfilter)
segmk.write()
if __name__ == "__main__":
main()
| [] | [] | [
"FUZDIR"
] | [] | ["FUZDIR"] | python | 1 | 0 | |
app/public/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'public.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/server/server.go | package server
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io/ioutil"
net2 "net"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"github.com/k3s-io/helm-controller/pkg/helm"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/apiaddresses"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/deploy"
"github.com/rancher/k3s/pkg/node"
"github.com/rancher/k3s/pkg/nodepassword"
"github.com/rancher/k3s/pkg/rootlessports"
"github.com/rancher/k3s/pkg/servicelb"
"github.com/rancher/k3s/pkg/static"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
v1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/leader"
"github.com/rancher/wrangler/pkg/resolvehome"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/net"
)
const (
MasterRoleLabelKey = "node-role.kubernetes.io/master"
ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane"
ETCDRoleLabelKey = "node-role.kubernetes.io/etcd"
)
func ResolveDataDir(dataDir string) (string, error) {
dataDir, err := datadir.Resolve(dataDir)
return filepath.Join(dataDir, "server"), err
}
func StartServer(ctx context.Context, config *Config) error {
if err := setupDataDirAndChdir(&config.ControlConfig); err != nil {
return err
}
if err := setNoProxyEnv(&config.ControlConfig); err != nil {
return err
}
if err := control.Server(ctx, &config.ControlConfig); err != nil {
return errors.Wrap(err, "starting kubernetes")
}
config.ControlConfig.Runtime.Handler = router(ctx, config)
if config.ControlConfig.DisableAPIServer {
go setETCDLabelsAndAnnotations(ctx, config)
} else {
go startOnAPIServerReady(ctx, config)
}
for _, hook := range config.StartupHooks {
if err := hook(ctx, config.ControlConfig.Runtime.APIServerReady, config.ControlConfig.Runtime.KubeConfigAdmin); err != nil {
return errors.Wrap(err, "startup hook")
}
}
ip := net2.ParseIP(config.ControlConfig.BindAddress)
if ip == nil {
hostIP, err := net.ChooseHostInterface()
if err == nil {
ip = hostIP
} else {
ip = net2.ParseIP("127.0.0.1")
}
}
if err := printTokens(ip.String(), &config.ControlConfig); err != nil {
return err
}
return writeKubeConfig(config.ControlConfig.Runtime.ServerCA, config)
}
func startOnAPIServerReady(ctx context.Context, config *Config) {
select {
case <-ctx.Done():
return
case <-config.ControlConfig.Runtime.APIServerReady:
if err := runControllers(ctx, config); err != nil {
logrus.Fatalf("failed to start controllers: %v", err)
}
}
}
func runControllers(ctx context.Context, config *Config) error {
controlConfig := &config.ControlConfig
sc, err := newContext(ctx, controlConfig.Runtime.KubeConfigAdmin)
if err != nil {
return err
}
if err := stageFiles(ctx, sc, controlConfig); err != nil {
return err
}
// run migration before we set controlConfig.Runtime.Core
if err := nodepassword.MigrateFile(
sc.Core.Core().V1().Secret(),
sc.Core.Core().V1().Node(),
controlConfig.Runtime.NodePasswdFile); err != nil {
logrus.Warn(errors.Wrapf(err, "error migrating node-password file"))
}
controlConfig.Runtime.Core = sc.Core
if controlConfig.Runtime.ClusterControllerStart != nil {
if err := controlConfig.Runtime.ClusterControllerStart(ctx); err != nil {
return errors.Wrapf(err, "starting cluster controllers")
}
}
for _, controller := range config.Controllers {
if err := controller(ctx, sc); err != nil {
return errors.Wrap(err, "controller")
}
}
if err := sc.Start(ctx); err != nil {
return err
}
start := func(ctx context.Context) {
if err := coreControllers(ctx, sc, config); err != nil {
panic(err)
}
for _, controller := range config.LeaderControllers {
if err := controller(ctx, sc); err != nil {
panic(errors.Wrap(err, "leader controller"))
}
}
if err := sc.Start(ctx); err != nil {
panic(err)
}
}
go setControlPlaneRoleLabel(ctx, sc.Core.Core().V1().Node(), config)
go setClusterDNSConfig(ctx, config, sc.Core.Core().V1().ConfigMap())
if controlConfig.NoLeaderElect {
go func() {
start(ctx)
<-ctx.Done()
logrus.Fatal("controllers exited")
}()
} else {
go leader.RunOrDie(ctx, "", version.Program, sc.K8s, start)
}
return nil
}
func coreControllers(ctx context.Context, sc *Context, config *Config) error {
if err := node.Register(ctx,
!config.ControlConfig.Skips["coredns"],
sc.Core.Core().V1().Secret(),
sc.Core.Core().V1().ConfigMap(),
sc.Core.Core().V1().Node()); err != nil {
return err
}
helm.Register(ctx, sc.Apply,
sc.Helm.Helm().V1().HelmChart(),
sc.Helm.Helm().V1().HelmChartConfig(),
sc.Batch.Batch().V1().Job(),
sc.Auth.Rbac().V1().ClusterRoleBinding(),
sc.Core.Core().V1().ServiceAccount(),
sc.Core.Core().V1().ConfigMap())
if err := servicelb.Register(ctx,
sc.K8s,
sc.Apply,
sc.Apps.Apps().V1().DaemonSet(),
sc.Apps.Apps().V1().Deployment(),
sc.Core.Core().V1().Node(),
sc.Core.Core().V1().Pod(),
sc.Core.Core().V1().Service(),
sc.Core.Core().V1().Endpoints(),
!config.DisableServiceLB, config.Rootless); err != nil {
return err
}
if err := apiaddresses.Register(ctx, config.ControlConfig.Runtime, sc.Core.Core().V1().Endpoints()); err != nil {
return err
}
if config.Rootless {
return rootlessports.Register(ctx, sc.Core.Core().V1().Service(), !config.DisableServiceLB, config.ControlConfig.HTTPSPort)
}
return nil
}
func stageFiles(ctx context.Context, sc *Context, controlConfig *config.Control) error {
dataDir := filepath.Join(controlConfig.DataDir, "static")
if err := static.Stage(dataDir); err != nil {
return err
}
dataDir = filepath.Join(controlConfig.DataDir, "manifests")
templateVars := map[string]string{
"%{CLUSTER_DNS}%": controlConfig.ClusterDNS.String(),
"%{CLUSTER_DOMAIN}%": controlConfig.ClusterDomain,
"%{DEFAULT_LOCAL_STORAGE_PATH}%": controlConfig.DefaultLocalStoragePath,
}
skip := controlConfig.Skips
if !skip["traefik"] && isHelmChartTraefikV1(sc) {
logrus.Warn("Skipping Traefik v2 deployment due to existing Traefik v1 installation")
skip["traefik"] = true
}
if err := deploy.Stage(dataDir, templateVars, skip); err != nil {
return err
}
return deploy.WatchFiles(ctx, sc.Apply, sc.K3s.K3s().V1().Addon(), controlConfig.Disables, dataDir)
}
// isHelmChartTraefikV1 checks for an existing HelmChart resource with spec.chart containing traefik-1,
// as deployed by the legacy chart (https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz)
func isHelmChartTraefikV1(sc *Context) bool {
prefix := "traefik-1."
helmChart, err := sc.Helm.Helm().V1().HelmChart().Get(metav1.NamespaceSystem, "traefik", metav1.GetOptions{})
if err != nil {
logrus.WithError(err).Info("Failed to get existing traefik HelmChart")
return false
}
chart := path.Base(helmChart.Spec.Chart)
if strings.HasPrefix(chart, prefix) {
logrus.WithField("chart", chart).Info("Found existing traefik v1 HelmChart")
return true
}
return false
}
func HomeKubeConfig(write, rootless bool) (string, error) {
if write {
if os.Getuid() == 0 && !rootless {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
}
if _, err := os.Stat(datadir.GlobalConfig); err == nil {
return datadir.GlobalConfig, nil
}
return resolvehome.Resolve(datadir.HomeConfig)
}
func printTokens(advertiseIP string, config *config.Control) error {
var (
nodeFile string
)
if advertiseIP == "" {
advertiseIP = "127.0.0.1"
}
if len(config.Runtime.ServerToken) > 0 {
p := filepath.Join(config.DataDir, "token")
if err := writeToken(config.Runtime.ServerToken, p, config.Runtime.ServerCA); err == nil {
logrus.Infof("Node token is available at %s", p)
nodeFile = p
}
// backwards compatibility
np := filepath.Join(config.DataDir, "node-token")
if !isSymlink(np) {
if err := os.RemoveAll(np); err != nil {
return err
}
if err := os.Symlink(p, np); err != nil {
return err
}
}
}
if len(nodeFile) > 0 {
printToken(config.SupervisorPort, advertiseIP, "To join node to cluster:", "agent")
}
return nil
}
func writeKubeConfig(certs string, config *Config) error {
ip := config.ControlConfig.BindAddress
if ip == "" {
ip = "127.0.0.1"
}
url := fmt.Sprintf("https://%s:%d", ip, config.ControlConfig.HTTPSPort)
kubeConfig, err := HomeKubeConfig(true, config.Rootless)
def := true
if err != nil {
kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-"+version.Program+".yaml")
def = false
}
kubeConfigSymlink := kubeConfig
if config.ControlConfig.KubeConfigOutput != "" {
kubeConfig = config.ControlConfig.KubeConfigOutput
}
if isSymlink(kubeConfigSymlink) {
if err := os.Remove(kubeConfigSymlink); err != nil {
logrus.Errorf("Failed to remove kubeconfig symlink")
}
}
if err = clientaccess.WriteClientKubeConfig(kubeConfig, url, config.ControlConfig.Runtime.ServerCA, config.ControlConfig.Runtime.ClientAdminCert,
config.ControlConfig.Runtime.ClientAdminKey); err == nil {
logrus.Infof("Wrote kubeconfig %s", kubeConfig)
} else {
logrus.Errorf("Failed to generate kubeconfig: %v", err)
return err
}
if config.ControlConfig.KubeConfigMode != "" {
mode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0)
if err == nil {
util.SetFileModeForPath(kubeConfig, os.FileMode(mode))
} else {
logrus.Errorf("Failed to set %s to mode %s: %v", kubeConfig, os.FileMode(mode), err)
}
} else {
util.SetFileModeForPath(kubeConfig, os.FileMode(0600))
}
if kubeConfigSymlink != kubeConfig {
if err := writeConfigSymlink(kubeConfig, kubeConfigSymlink); err != nil {
logrus.Errorf("Failed to write kubeconfig symlink: %v", err)
}
}
if def {
logrus.Infof("Run: %s kubectl", filepath.Base(os.Args[0]))
}
return nil
}
func setupDataDirAndChdir(config *config.Control) error {
var (
err error
)
config.DataDir, err = ResolveDataDir(config.DataDir)
if err != nil {
return err
}
dataDir := config.DataDir
if err := os.MkdirAll(dataDir, 0700); err != nil {
return errors.Wrapf(err, "can not mkdir %s", dataDir)
}
if err := os.Chdir(dataDir); err != nil {
return errors.Wrapf(err, "can not chdir %s", dataDir)
}
return nil
}
func printToken(httpsPort int, advertiseIP, prefix, cmd string) {
ip := advertiseIP
if ip == "" {
hostIP, err := net.ChooseHostInterface()
if err != nil {
logrus.Errorf("Failed to choose interface: %v", err)
}
ip = hostIP.String()
}
logrus.Infof("%s %s %s -s https://%s:%d -t ${NODE_TOKEN}", prefix, version.Program, cmd, ip, httpsPort)
}
func FormatToken(token string, certFile string) (string, error) {
if len(token) == 0 {
return token, nil
}
prefix := "K10"
if len(certFile) > 0 {
bytes, err := ioutil.ReadFile(certFile)
if err != nil {
return "", nil
}
digest := sha256.Sum256(bytes)
prefix = "K10" + hex.EncodeToString(digest[:]) + "::"
}
return prefix + token, nil
}
func writeToken(token, file, certs string) error {
if len(token) == 0 {
return nil
}
token, err := FormatToken(token, certs)
if err != nil {
return err
}
return ioutil.WriteFile(file, []byte(token+"\n"), 0600)
}
func setNoProxyEnv(config *config.Control) error {
splitter := func(c rune) bool {
return c == ','
}
envList := []string{}
envList = append(envList, strings.FieldsFunc(os.Getenv("NO_PROXY"), splitter)...)
envList = append(envList, strings.FieldsFunc(os.Getenv("no_proxy"), splitter)...)
envList = append(envList,
".svc",
"."+config.ClusterDomain,
config.ClusterIPRange.String(),
config.ServiceIPRange.String(),
)
os.Unsetenv("no_proxy")
return os.Setenv("NO_PROXY", strings.Join(envList, ","))
}
func writeConfigSymlink(kubeconfig, kubeconfigSymlink string) error {
if err := os.Remove(kubeconfigSymlink); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove %s file: %v", kubeconfigSymlink, err)
}
if err := os.MkdirAll(filepath.Dir(kubeconfigSymlink), 0755); err != nil {
return fmt.Errorf("failed to create path for symlink: %v", err)
}
if err := os.Symlink(kubeconfig, kubeconfigSymlink); err != nil {
return fmt.Errorf("failed to create symlink: %v", err)
}
return nil
}
func isSymlink(config string) bool {
if fi, err := os.Lstat(config); err == nil && (fi.Mode()&os.ModeSymlink == os.ModeSymlink) {
return true
}
return false
}
func setControlPlaneRoleLabel(ctx context.Context, nodes v1.NodeClient, config *Config) error {
if config.DisableAgent || config.ControlConfig.DisableAPIServer {
return nil
}
for {
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
logrus.Info("Waiting for control-plane node agent startup")
time.Sleep(1 * time.Second)
continue
}
node, err := nodes.Get(nodeName, metav1.GetOptions{})
if err != nil {
logrus.Infof("Waiting for control-plane node %s startup: %v", nodeName, err)
time.Sleep(1 * time.Second)
continue
}
// remove etcd label if etcd is disabled
var etcdRoleLabelExists bool
if config.ControlConfig.DisableETCD {
if _, ok := node.Labels[ETCDRoleLabelKey]; ok {
delete(node.Labels, ETCDRoleLabelKey)
etcdRoleLabelExists = true
}
}
if v, ok := node.Labels[ControlPlaneRoleLabelKey]; ok && v == "true" && !etcdRoleLabelExists {
break
}
if node.Labels == nil {
node.Labels = make(map[string]string)
}
node.Labels[ControlPlaneRoleLabelKey] = "true"
node.Labels[MasterRoleLabelKey] = "true"
_, err = nodes.Update(node)
if err == nil {
logrus.Infof("Control-plane role label has been set successfully on node: %s", nodeName)
break
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
}
}
return nil
}
func setClusterDNSConfig(ctx context.Context, controlConfig *Config, configMap v1.ConfigMapClient) error {
// check if configmap already exists
_, err := configMap.Get("kube-system", "cluster-dns", metav1.GetOptions{})
if err == nil {
logrus.Infof("Cluster dns configmap already exists")
return nil
}
clusterDNS := controlConfig.ControlConfig.ClusterDNS
clusterDomain := controlConfig.ControlConfig.ClusterDomain
c := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-dns",
Namespace: "kube-system",
},
Data: map[string]string{
"clusterDNS": clusterDNS.String(),
"clusterDomain": clusterDomain,
},
}
for {
_, err = configMap.Create(c)
if err == nil {
logrus.Infof("Cluster dns configmap has been set successfully")
break
}
logrus.Infof("Waiting for control-plane dns startup: %v", err)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(time.Second):
}
}
return nil
}
| [
"\"NO_PROXY\"",
"\"no_proxy\"",
"\"NODE_NAME\""
] | [] | [
"NO_PROXY",
"NODE_NAME",
"no_proxy"
] | [] | ["NO_PROXY", "NODE_NAME", "no_proxy"] | go | 3 | 0 | |
internal/search/run/repository_test.go | package run
import (
"context"
"os"
"strconv"
"testing"
"github.com/google/zoekt"
"github.com/sourcegraph/sourcegraph/internal/search"
searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend"
"github.com/sourcegraph/sourcegraph/internal/search/result"
"github.com/sourcegraph/sourcegraph/internal/search/streaming"
"github.com/sourcegraph/sourcegraph/internal/search/unindexed"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/hexops/autogold"
)
func TestRepoShouldBeAdded(t *testing.T) {
if os.Getenv("CI") != "" {
// #25936: Some unit tests rely on external services that break
// in CI but not locally. They should be removed or improved.
t.Skip("TestRepoShouldBeAdded only works in local dev and is not reliable in CI")
}
zoekt := &searchbackend.FakeSearcher{}
t.Run("repo should be included in results, query has repoHasFile filter", func(t *testing.T) {
repo := &search.RepositoryRevisions{Repo: types.MinimalRepo{ID: 123, Name: "foo/one"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}}
unindexed.MockSearchFilesInRepos = func() ([]result.Match, *streaming.Stats, error) {
rev := "1a2b3c"
return []result.Match{&result.FileMatch{
File: result.File{
Repo: types.MinimalRepo{ID: 123, Name: repo.Repo.Name},
InputRev: &rev,
Path: "foo.go",
},
}}, &streaming.Stats{}, nil
}
pat := &search.TextPatternInfo{
Pattern: "",
FilePatternsReposMustInclude: []string{"foo"},
IsRegExp: true,
FileMatchLimit: 1,
PathPatternsAreCaseSensitive: false,
PatternMatchesContent: true,
PatternMatchesPath: true,
}
shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat)
if err != nil {
t.Fatal(err)
}
if !shouldBeAdded {
t.Errorf("Expected shouldBeAdded for repo %v to be true, but got false", repo)
}
})
t.Run("repo shouldn't be included in results, query has repoHasFile filter ", func(t *testing.T) {
repo := &search.RepositoryRevisions{Repo: types.MinimalRepo{Name: "foo/no-match"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}}
unindexed.MockSearchFilesInRepos = func() ([]result.Match, *streaming.Stats, error) {
return nil, &streaming.Stats{}, nil
}
pat := &search.TextPatternInfo{
Pattern: "",
FilePatternsReposMustInclude: []string{"foo"},
IsRegExp: true,
FileMatchLimit: 1,
PathPatternsAreCaseSensitive: false,
PatternMatchesContent: true,
PatternMatchesPath: true,
}
shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat)
if err != nil {
t.Fatal(err)
}
if shouldBeAdded {
t.Errorf("Expected shouldBeAdded for repo %v to be false, but got true", repo)
}
})
t.Run("repo shouldn't be included in results, query has -repoHasFile filter", func(t *testing.T) {
repo := &search.RepositoryRevisions{Repo: types.MinimalRepo{ID: 123, Name: "foo/one"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}}
unindexed.MockSearchFilesInRepos = func() ([]result.Match, *streaming.Stats, error) {
rev := "1a2b3c"
return []result.Match{&result.FileMatch{
File: result.File{
Repo: types.MinimalRepo{ID: 123, Name: repo.Repo.Name},
InputRev: &rev,
Path: "foo.go",
},
}}, &streaming.Stats{}, nil
}
pat := &search.TextPatternInfo{
Pattern: "",
FilePatternsReposMustExclude: []string{"foo"},
IsRegExp: true,
FileMatchLimit: 1,
PathPatternsAreCaseSensitive: false,
PatternMatchesContent: true,
PatternMatchesPath: true,
}
shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat)
if err != nil {
t.Fatal(err)
}
if shouldBeAdded {
t.Errorf("Expected shouldBeAdded for repo %v to be false, but got true", repo)
}
})
t.Run("repo should be included in results, query has -repoHasFile filter", func(t *testing.T) {
repo := &search.RepositoryRevisions{Repo: types.MinimalRepo{Name: "foo/no-match"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}}
unindexed.MockSearchFilesInRepos = func() ([]result.Match, *streaming.Stats, error) {
return nil, &streaming.Stats{}, nil
}
pat := &search.TextPatternInfo{
Pattern: "",
FilePatternsReposMustExclude: []string{"foo"},
IsRegExp: true,
FileMatchLimit: 1,
PathPatternsAreCaseSensitive: false,
PatternMatchesContent: true,
PatternMatchesPath: true,
}
shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat)
if err != nil {
t.Fatal(err)
}
if !shouldBeAdded {
t.Errorf("Expected shouldBeAdded for repo %v to be true, but got false", repo)
}
})
}
// repoShouldBeAdded determines whether a repository should be included in the result set based on whether the repository fits in the subset
// of repostiories specified in the query's `repohasfile` and `-repohasfile` fields if they exist.
func repoShouldBeAdded(ctx context.Context, zoekt zoekt.Streamer, repo *search.RepositoryRevisions, pattern *search.TextPatternInfo) (bool, error) {
repos := []*search.RepositoryRevisions{repo}
args := search.TextParameters{
PatternInfo: pattern,
Zoekt: zoekt,
}
rsta, err := reposToAdd(ctx, &args, repos)
if err != nil {
return false, err
}
return len(rsta) == 1, nil
}
func Test_validRepoPattern(t *testing.T) {
test := func(input string) string {
_, ok := validRepoPattern(input)
return strconv.FormatBool(ok)
}
autogold.Want("normal pattern", "true").Equal(t, test("ok ok"))
autogold.Want("normal pattern with space", "true").Equal(t, test("ok @thing"))
autogold.Want("unsupported prefix", "false").Equal(t, test("@nope"))
autogold.Want("unsupported regexp", "false").Equal(t, test("(nope).*?(@(thing))"))
}
| [
"\"CI\""
] | [] | [
"CI"
] | [] | ["CI"] | go | 1 | 0 | |
pkg/runner/run_context.go | package runner
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/google/shlex"
"github.com/spf13/pflag"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
"github.com/nektos/act/pkg/model"
)
const ActPath string = "/var/run/act"
// RunContext contains info about current job
type RunContext struct {
Name string
Config *Config
Matrix map[string]interface{}
Run *model.Run
EventJSON string
Env map[string]string
ExtraPath []string
CurrentStep string
StepResults map[string]*model.StepResult
ExprEval ExpressionEvaluator
JobContainer container.Container
OutputMappings map[MappableOutput]MappableOutput
JobName string
ActionPath string
ActionRef string
ActionRepository string
Composite *model.Action
Inputs map[string]interface{}
Parent *RunContext
}
func (rc *RunContext) Clone() *RunContext {
clone := *rc
clone.CurrentStep = ""
clone.Composite = nil
clone.Inputs = nil
clone.StepResults = make(map[string]*model.StepResult)
clone.Parent = rc
return &clone
}
type MappableOutput struct {
StepID string
OutputName string
}
func (rc *RunContext) String() string {
return fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name)
}
// GetEnv returns the env for the context
func (rc *RunContext) GetEnv() map[string]string {
if rc.Env == nil {
rc.Env = mergeMaps(rc.Config.Env, rc.Run.Workflow.Env, rc.Run.Job().Environment())
}
rc.Env["ACT"] = "true"
return rc.Env
}
func (rc *RunContext) jobContainerName() string {
return createContainerName("act", rc.String())
}
// Returns the binds and mounts for the container, resolving paths as appopriate
func (rc *RunContext) GetBindsAndMounts() ([]string, map[string]string) {
name := rc.jobContainerName()
if rc.Config.ContainerDaemonSocket == "" {
rc.Config.ContainerDaemonSocket = "/var/run/docker.sock"
}
binds := []string{
fmt.Sprintf("%s:%s", rc.Config.ContainerDaemonSocket, "/var/run/docker.sock"),
}
mounts := map[string]string{
"act-toolcache": "/toolcache",
name + "-env": ActPath,
}
if rc.Config.BindWorkdir {
bindModifiers := ""
if runtime.GOOS == "darwin" {
bindModifiers = ":delegated"
}
if selinux.GetEnabled() {
bindModifiers = ":z"
}
binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, rc.Config.ContainerWorkdir(), bindModifiers))
} else {
mounts[name] = rc.Config.ContainerWorkdir()
}
return binds, mounts
}
func (rc *RunContext) startJobContainer() common.Executor {
image := rc.platformImage()
hostname := rc.hostname()
return func(ctx context.Context) error {
rawLogger := common.Logger(ctx).WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
} else {
rawLogger.Debugf("%s", s)
}
return true
})
username, password, err := rc.handleCredentials()
if err != nil {
return fmt.Errorf("failed to handle credentials: %s", err)
}
common.Logger(ctx).Infof("\U0001f680 Start image=%s", image)
name := rc.jobContainerName()
envList := make([]string, 0)
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", "/opt/hostedtoolcache"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
binds, mounts := rc.GetBindsAndMounts()
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
Cmd: nil,
Entrypoint: []string{"/usr/bin/tail", "-f", "/dev/null"},
WorkingDir: rc.Config.ContainerWorkdir(),
Image: image,
Username: username,
Password: password,
Name: name,
Env: envList,
Mounts: mounts,
NetworkMode: "host",
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
Hostname: hostname,
})
var copyWorkspace bool
var copyToPath string
if !rc.Config.BindWorkdir {
copyToPath, copyWorkspace = rc.localCheckoutPath()
copyToPath = filepath.Join(rc.Config.ContainerWorkdir(), copyToPath)
}
return common.NewPipelineExecutor(
rc.JobContainer.Pull(rc.Config.ForcePull),
rc.stopJobContainer(),
rc.JobContainer.Create(rc.Config.ContainerCapAdd, rc.Config.ContainerCapDrop),
rc.JobContainer.Start(false),
rc.JobContainer.UpdateFromImageEnv(&rc.Env),
rc.JobContainer.UpdateFromEnv("/etc/environment", &rc.Env),
rc.JobContainer.Exec([]string{"mkdir", "-m", "0777", "-p", ActPath}, rc.Env, "root", ""),
rc.JobContainer.CopyDir(copyToPath, rc.Config.Workdir+string(filepath.Separator)+".", rc.Config.UseGitIgnore).IfBool(copyWorkspace),
rc.JobContainer.Copy(ActPath+"/", &container.FileEntry{
Name: "workflow/event.json",
Mode: 0644,
Body: rc.EventJSON,
}, &container.FileEntry{
Name: "workflow/envs.txt",
Mode: 0666,
Body: "",
}, &container.FileEntry{
Name: "workflow/paths.txt",
Mode: 0666,
Body: "",
}),
)(ctx)
}
}
func (rc *RunContext) execJobContainer(cmd []string, env map[string]string, user, workdir string) common.Executor {
return func(ctx context.Context) error {
return rc.JobContainer.Exec(cmd, env, user, workdir)(ctx)
}
}
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
func (rc *RunContext) stopJobContainer() common.Executor {
return func(ctx context.Context) error {
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
return rc.JobContainer.Remove().
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false))(ctx)
}
return nil
}
}
// Prepare the mounts and binds for the worker
// ActionCacheDir is for rc
func (rc *RunContext) ActionCacheDir() string {
var xdgCache string
var ok bool
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
if home, err := homedir.Dir(); err == nil {
xdgCache = filepath.Join(home, ".cache")
} else if xdgCache, err = filepath.Abs("."); err != nil {
log.Fatal(err)
}
}
return filepath.Join(xdgCache, "act")
}
// Interpolate outputs after a job is done
func (rc *RunContext) interpolateOutputs() common.Executor {
return func(ctx context.Context) error {
ee := rc.NewExpressionEvaluator()
for k, v := range rc.Run.Job().Outputs {
interpolated := ee.Interpolate(v)
if v != interpolated {
rc.Run.Job().Outputs[k] = interpolated
}
}
return nil
}
}
func (rc *RunContext) startContainer() common.Executor {
return rc.startJobContainer()
}
func (rc *RunContext) stopContainer() common.Executor {
return rc.stopJobContainer()
}
func (rc *RunContext) closeContainer() common.Executor {
return func(ctx context.Context) error {
if rc.JobContainer != nil {
return rc.JobContainer.Close()(ctx)
}
return nil
}
}
func (rc *RunContext) matrix() map[string]interface{} {
return rc.Matrix
}
func (rc *RunContext) result(result string) {
rc.Run.Job().Result = result
}
func (rc *RunContext) steps() []*model.Step {
return rc.Run.Job().Steps
}
// Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) Executor() common.Executor {
return func(ctx context.Context) error {
isEnabled, err := rc.isEnabled(ctx)
if err != nil {
return err
}
if isEnabled {
return newJobExecutor(rc)(ctx)
}
return nil
}
}
// Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) CompositeExecutor() common.Executor {
steps := make([]common.Executor, 0)
for i, step := range rc.Composite.Runs.Steps {
if step.ID == "" {
step.ID = fmt.Sprintf("%d", i)
}
stepcopy := step
stepExec := rc.newStepExecutor(&stepcopy)
steps = append(steps, func(ctx context.Context) error {
err := stepExec(ctx)
if err != nil {
common.Logger(ctx).Errorf("%v", err)
common.SetJobError(ctx, err)
} else if ctx.Err() != nil {
common.Logger(ctx).Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return nil
})
}
steps = append(steps, common.JobError)
return func(ctx context.Context) error {
return common.NewPipelineExecutor(steps...)(common.WithJobErrorContainer(ctx))
}
}
func (rc *RunContext) newStepExecutor(step *model.Step) common.Executor {
sc := &StepContext{
RunContext: rc,
Step: step,
}
return func(ctx context.Context) error {
rc.CurrentStep = sc.Step.ID
rc.StepResults[rc.CurrentStep] = &model.StepResult{
Outcome: model.StepStatusSuccess,
Conclusion: model.StepStatusSuccess,
Outputs: make(map[string]string),
}
runStep, err := sc.isEnabled(ctx)
if err != nil {
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusFailure
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusFailure
return err
}
if !runStep {
log.Debugf("Skipping step '%s' due to '%s'", sc.Step.String(), sc.Step.If.Value)
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusSkipped
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusSkipped
return nil
}
exprEval, err := sc.setupEnv(ctx)
if err != nil {
return err
}
rc.ExprEval = exprEval
common.Logger(ctx).Infof("\u2B50 Run %s", sc.Step)
err = sc.Executor(ctx)(ctx)
if err == nil {
common.Logger(ctx).Infof(" \u2705 Success - %s", sc.Step)
} else {
common.Logger(ctx).Errorf(" \u274C Failure - %s", sc.Step)
rc.StepResults[rc.CurrentStep].Outcome = model.StepStatusFailure
if sc.Step.ContinueOnError {
common.Logger(ctx).Infof("Failed but continue next step")
err = nil
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusSuccess
} else {
rc.StepResults[rc.CurrentStep].Conclusion = model.StepStatusFailure
}
}
return err
}
}
func (rc *RunContext) platformImage() string {
job := rc.Run.Job()
c := job.Container()
if c != nil {
return rc.ExprEval.Interpolate(c.Image)
}
if job.RunsOn() == nil {
log.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
image := rc.Config.Platforms[strings.ToLower(platformName)]
if image != "" {
return image
}
}
return ""
}
func (rc *RunContext) hostname() string {
job := rc.Run.Job()
c := job.Container()
if c == nil {
return ""
}
optionsFlags := pflag.NewFlagSet("container_options", pflag.ContinueOnError)
hostname := optionsFlags.StringP("hostname", "h", "", "")
optionsArgs, err := shlex.Split(c.Options)
if err != nil {
log.Warnf("Cannot parse container options: %s", c.Options)
return ""
}
err = optionsFlags.Parse(optionsArgs)
if err != nil {
log.Warnf("Cannot parse container options: %s", c.Options)
return ""
}
return *hostname
}
func (rc *RunContext) isEnabled(ctx context.Context) (bool, error) {
job := rc.Run.Job()
l := common.Logger(ctx)
runJob, err := EvalBool(rc.ExprEval, job.If.Value)
if err != nil {
return false, fmt.Errorf(" \u274C Error in if-expression: \"if: %s\" (%s)", job.If.Value, err)
}
if !runJob {
l.Debugf("Skipping job '%s' due to '%s'", job.Name, job.If.Value)
return false, nil
}
img := rc.platformImage()
if img == "" {
if job.RunsOn() == nil {
log.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
l.Infof("\U0001F6A7 Skipping unsupported platform -- Try running with `-P %+v=...`", platformName)
}
return false, nil
}
return true, nil
}
func mergeMaps(maps ...map[string]string) map[string]string {
rtnMap := make(map[string]string)
for _, m := range maps {
for k, v := range m {
rtnMap[k] = v
}
}
return rtnMap
}
func createContainerName(parts ...string) string {
name := make([]string, 0)
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
partLen := (30 / len(parts)) - 1
for i, part := range parts {
if i == len(parts)-1 {
name = append(name, pattern.ReplaceAllString(part, "-"))
} else {
// If any part has a '-<number>' on the end it is likely part of a matrix job.
// Let's preserve the number to prevent clashes in container names.
re := regexp.MustCompile("-[0-9]+$")
num := re.FindStringSubmatch(part)
if len(num) > 0 {
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen-len(num[0])))
name = append(name, num[0])
} else {
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen))
}
}
}
return strings.ReplaceAll(strings.Trim(strings.Join(name, "-"), "-"), "--", "-")
}
func trimToLen(s string, l int) string {
if l < 0 {
l = 0
}
if len(s) > l {
return s[:l]
}
return s
}
func (rc *RunContext) getJobContext() *model.JobContext {
jobStatus := "success"
for _, stepStatus := range rc.StepResults {
if stepStatus.Conclusion == model.StepStatusFailure {
jobStatus = "failure"
break
}
}
return &model.JobContext{
Status: jobStatus,
}
}
func (rc *RunContext) getStepsContext() map[string]*model.StepResult {
return rc.StepResults
}
func (rc *RunContext) getGithubContext() *model.GithubContext {
ghc := &model.GithubContext{
Event: make(map[string]interface{}),
EventPath: ActPath + "/workflow/event.json",
Workflow: rc.Run.Workflow.Name,
RunID: rc.Config.Env["GITHUB_RUN_ID"],
RunNumber: rc.Config.Env["GITHUB_RUN_NUMBER"],
Actor: rc.Config.Actor,
EventName: rc.Config.EventName,
Workspace: rc.Config.ContainerWorkdir(),
Action: rc.CurrentStep,
Token: rc.Config.Secrets["GITHUB_TOKEN"],
ActionPath: rc.ActionPath,
ActionRef: rc.ActionRef,
ActionRepository: rc.ActionRepository,
RepositoryOwner: rc.Config.Env["GITHUB_REPOSITORY_OWNER"],
RetentionDays: rc.Config.Env["GITHUB_RETENTION_DAYS"],
RunnerPerflog: rc.Config.Env["RUNNER_PERFLOG"],
RunnerTrackingID: rc.Config.Env["RUNNER_TRACKING_ID"],
}
if ghc.RunID == "" {
ghc.RunID = "1"
}
if ghc.RunNumber == "" {
ghc.RunNumber = "1"
}
if ghc.RetentionDays == "" {
ghc.RetentionDays = "0"
}
if ghc.RunnerPerflog == "" {
ghc.RunnerPerflog = "/dev/null"
}
// Backwards compatibility for configs that require
// a default rather than being run as a cmd
if ghc.Actor == "" {
ghc.Actor = "nektos/act"
}
repoPath := rc.Config.Workdir
repo, err := common.FindGithubRepo(repoPath, rc.Config.GitHubInstance)
if err != nil {
log.Warningf("unable to get git repo: %v", err)
} else {
ghc.Repository = repo
if ghc.RepositoryOwner == "" {
ghc.RepositoryOwner = strings.Split(repo, "/")[0]
}
}
if rc.EventJSON != "" {
err = json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
if err != nil {
log.Errorf("Unable to Unmarshal event '%s': %v", rc.EventJSON, err)
}
}
if ghc.EventName == "pull_request" {
ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref"))
ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref"))
}
ghc.SetRefAndSha(rc.Config.DefaultBranch, repoPath)
return ghc
}
func isLocalCheckout(ghc *model.GithubContext, step *model.Step) bool {
if step.Type() == model.StepTypeInvalid {
// This will be errored out by the executor later, we need this here to avoid a null panic though
return false
}
if step.Type() != model.StepTypeUsesActionRemote {
return false
}
remoteAction := newRemoteAction(step.Uses)
if remoteAction == nil {
// IsCheckout() will nil panic if we dont bail out early
return false
}
if !remoteAction.IsCheckout() {
return false
}
if repository, ok := step.With["repository"]; ok && repository != ghc.Repository {
return false
}
if repository, ok := step.With["ref"]; ok && repository != ghc.Ref {
return false
}
return true
}
func asString(v interface{}) string {
if v == nil {
return ""
} else if s, ok := v.(string); ok {
return s
}
return ""
}
func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) {
var ok bool
if len(ks) == 0 { // degenerate input
return nil
}
if rval, ok = m[ks[0]]; !ok {
return nil
} else if len(ks) == 1 { // we've reached the final key
return rval
} else if m, ok = rval.(map[string]interface{}); !ok {
return nil
} else { // 1+ more keys
return nestedMapLookup(m, ks[1:]...)
}
}
func (rc *RunContext) withGithubEnv(env map[string]string) map[string]string {
github := rc.getGithubContext()
env["CI"] = "true"
env["GITHUB_ENV"] = ActPath + "/workflow/envs.txt"
env["GITHUB_PATH"] = ActPath + "/workflow/paths.txt"
env["GITHUB_WORKFLOW"] = github.Workflow
env["GITHUB_RUN_ID"] = github.RunID
env["GITHUB_RUN_NUMBER"] = github.RunNumber
env["GITHUB_ACTION"] = github.Action
env["GITHUB_ACTION_PATH"] = github.ActionPath
env["GITHUB_ACTION_REPOSITORY"] = github.ActionRepository
env["GITHUB_ACTION_REF"] = github.ActionRef
env["GITHUB_ACTIONS"] = "true"
env["GITHUB_ACTOR"] = github.Actor
env["GITHUB_REPOSITORY"] = github.Repository
env["GITHUB_EVENT_NAME"] = github.EventName
env["GITHUB_EVENT_PATH"] = github.EventPath
env["GITHUB_WORKSPACE"] = github.Workspace
env["GITHUB_SHA"] = github.Sha
env["GITHUB_REF"] = github.Ref
env["GITHUB_TOKEN"] = github.Token
env["GITHUB_SERVER_URL"] = "https://github.com"
env["GITHUB_API_URL"] = "https://api.github.com"
env["GITHUB_GRAPHQL_URL"] = "https://api.github.com/graphql"
env["GITHUB_ACTION_REF"] = github.ActionRef
env["GITHUB_ACTION_REPOSITORY"] = github.ActionRepository
env["GITHUB_BASE_REF"] = github.BaseRef
env["GITHUB_HEAD_REF"] = github.HeadRef
env["GITHUB_JOB"] = rc.JobName
env["GITHUB_REPOSITORY_OWNER"] = github.RepositoryOwner
env["GITHUB_RETENTION_DAYS"] = github.RetentionDays
env["RUNNER_PERFLOG"] = github.RunnerPerflog
env["RUNNER_TRACKING_ID"] = github.RunnerTrackingID
if rc.Config.GitHubInstance != "github.com" {
env["GITHUB_SERVER_URL"] = fmt.Sprintf("https://%s", rc.Config.GitHubInstance)
env["GITHUB_API_URL"] = fmt.Sprintf("https://%s/api/v3", rc.Config.GitHubInstance)
env["GITHUB_GRAPHQL_URL"] = fmt.Sprintf("https://%s/api/graphql", rc.Config.GitHubInstance)
}
if rc.Config.ArtifactServerPath != "" {
setActionRuntimeVars(rc, env)
}
job := rc.Run.Job()
if job.RunsOn() != nil {
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
if platformName != "" {
if platformName == "ubuntu-latest" {
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
env["ImageOS"] = "ubuntu20"
} else {
platformName = strings.SplitN(strings.Replace(platformName, `-`, ``, 1), `.`, 2)[0]
env["ImageOS"] = platformName
}
}
}
}
return env
}
func setActionRuntimeVars(rc *RunContext, env map[string]string) {
actionsRuntimeURL := os.Getenv("ACTIONS_RUNTIME_URL")
if actionsRuntimeURL == "" {
actionsRuntimeURL = fmt.Sprintf("http://%s:%s/", common.GetOutboundIP().String(), rc.Config.ArtifactServerPort)
}
env["ACTIONS_RUNTIME_URL"] = actionsRuntimeURL
actionsRuntimeToken := os.Getenv("ACTIONS_RUNTIME_TOKEN")
if actionsRuntimeToken == "" {
actionsRuntimeToken = "token"
}
env["ACTIONS_RUNTIME_TOKEN"] = actionsRuntimeToken
}
func (rc *RunContext) localCheckoutPath() (string, bool) {
ghContext := rc.getGithubContext()
for _, step := range rc.Run.Job().Steps {
if isLocalCheckout(ghContext, step) {
return step.With["path"], true
}
}
return "", false
}
func (rc *RunContext) handleCredentials() (username, password string, err error) {
// TODO: remove below 2 lines when we can release act with breaking changes
username = rc.Config.Secrets["DOCKER_USERNAME"]
password = rc.Config.Secrets["DOCKER_PASSWORD"]
container := rc.Run.Job().Container()
if container == nil || container.Credentials == nil {
return
}
if container.Credentials != nil && len(container.Credentials) != 2 {
err = fmt.Errorf("invalid property count for key 'credentials:'")
return
}
ee := rc.NewExpressionEvaluator()
if username = ee.Interpolate(container.Credentials["username"]); username == "" {
err = fmt.Errorf("failed to interpolate container.credentials.username")
return
}
if password = ee.Interpolate(container.Credentials["password"]); password == "" {
err = fmt.Errorf("failed to interpolate container.credentials.password")
return
}
if container.Credentials["username"] == "" || container.Credentials["password"] == "" {
err = fmt.Errorf("container.credentials cannot be empty")
return
}
return username, password, err
}
| [
"\"ACTIONS_RUNTIME_URL\"",
"\"ACTIONS_RUNTIME_TOKEN\""
] | [] | [
"ACTIONS_RUNTIME_URL",
"ACTIONS_RUNTIME_TOKEN"
] | [] | ["ACTIONS_RUNTIME_URL", "ACTIONS_RUNTIME_TOKEN"] | go | 2 | 0 | |
src/main/java/com/programmer74/GradientDescentApplication.java | package com.programmer74;
import com.programmer74.GradientDescent.BasicGradientDescentCalculator;
import com.programmer74.GradientDescent.GradientDescentCalculator;
import com.programmer74.GradientDescent.SparkGradientDescentCalculator;
import com.programmer74.GradientDescent.LinearHypothesis;
import com.programmer74.util.DummyDataLoader;
import com.programmer74.util.Pair;
import java.util.Arrays;
import java.util.List;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.SparkConf;
public class GradientDescentApplication {
private static final int triesCount = 3;
public static double benchmark(GradientDescentCalculator calculator, double initialTheta0, double initialTheta1 ) {
double avgTime = 0;
for (int i = 0; i < triesCount; i++) {
long startTime = System.currentTimeMillis();
Pair<Double> finalTheta = calculator.calculate(initialTheta0, initialTheta1);
System.out.printf("theta0 = %f, theta1 = %f\n", finalTheta.getFirst(), finalTheta.getSecond());
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
System.out.println(totalTime + " ms");
avgTime += totalTime;
}
avgTime = avgTime / triesCount;
return avgTime;
}
public static void usage() {
System.out.println("Usage:\n$ java -jar GradientDescent.jar (--no-spark | <master url>) <csv filename>\n" +
" [--set-alpha <alpha>] [--set-epsilon <epsilon>] [--set-iterations <max iterations count>]\n" +
" [--set-theta-0 <initial-theta-0>] [--set-theta-1 <initial-theta-1>]\n" +
" [--set-worker-memory <worker memory>]");
}
public static void main(String[] args) {
boolean withoutSpark = false;
boolean localMode = false;
int localThreadCount = 0;
String masterLocation = "";
String fileName;
double alpha = 0.01;
double epsilon = 0.0001;
int maxIterations = 10_000;
double initialTheta0 = 0.1;
double initialTheta1 = 0.1;
String workingMemory = "8g";
boolean benchmarkMode = false;
if (args.length < 2) {
usage();
return;
}
if (args[0].equals("--no-spark")) {
withoutSpark = true;
System.out.println("Set mode to NO_SPARK");
} else {
if (args[0].matches("^local[\\[][0-9]+[]]$")) {
System.out.println("Set mode to SPARK_LOCAL_CLUSTER");
localMode = true;
localThreadCount = Integer.valueOf(args[0].replaceAll("[^0-9]", ""));
System.out.println("Local mode with thread count : " + localThreadCount);
} else if (args[0].matches("^spark://.+")) {
System.out.println("Set mode to SPARK_CLUSTER at " + args[0]);
localMode = false;
masterLocation = args[0];
} else {
System.out.println(args[0]);
usage();
return;
}
}
fileName = args[1];
try {
for (int i = 2; i < args.length; i++) {
switch (args[i]) {
case "--set-alpha":
i++;
alpha = Double.parseDouble(args[i]);
System.out.println("Set alpha to " + alpha);
break;
case "--set-iterations":
i++;
maxIterations = Integer.parseInt(args[i]);
System.out.println("Set Maximum iterations to " + maxIterations);
break;
case "--set-epsilon":
i++;
epsilon = Double.parseDouble(args[i]);
System.out.println("Set epsilon to " + epsilon);
break;
case "--set-theta-0":
i++;
initialTheta0 = Double.parseDouble(args[i]);
System.out.println("Set Initial Theta0 to " + initialTheta0);
break;
case "--set-theta-1":
i++;
initialTheta1 = Double.parseDouble(args[i]);
System.out.println("Set Initial Theta1 to " + initialTheta1);
break;
case "--set-worker-memory":
i++;
workingMemory = args[i];
System.out.println("Set worker memory to " + workingMemory);
break;
case "--benchmark":
benchmarkMode = true;
System.out.println("Benchmark Mode enabled");
break;
default:
usage();
return;
}
}
} catch (Exception ex) {
System.err.println("Incorrect arguments.");
usage();
System.exit(-2);
}
GradientDescentCalculator calculator = null;
if (withoutSpark) {
System.out.println("Importing file " + fileName);
long startTime = System.currentTimeMillis();
List<Pair<Double>> data = DummyDataLoader.importCSV(fileName);
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
System.out.println("File import time: " + totalTime + " ms");
System.out.println("Dataset size: " + data.size());
calculator = new BasicGradientDescentCalculator(data, new LinearHypothesis(), alpha, maxIterations, epsilon);
} else {
System.out.println("Configuring Spark...");
Logger.getLogger("org").setLevel(Level.WARN);
Logger.getLogger("akka").setLevel(Level.WARN);
SparkConf conf = new SparkConf().setAppName("GradientDescentApplication");
if (localMode) {
conf.setMaster("local[" + localThreadCount + "]")
.set("spark.driver.host", "localhost");
} else {
String jarPath = System.getenv("GDA_JAR_FILE");
if ((jarPath == null) || (jarPath.equals(""))) {
System.err.println("GDA_JAR_FILE not set.");
System.err.println("Please, run \"mvn package -DskipTests\" and \"export GDA_JAR_FILE=<file.java>\"");
System.err.println("Otherwise, the Spark Cluster won't be able to (de)serialize our custom classes.");
System.exit(-1);
}
conf.setMaster(masterLocation);
conf.set("spark.executor.memory", workingMemory);
conf.setJars(new String[]{jarPath});
}
JavaSparkContext sc = new JavaSparkContext(conf);
System.out.println("Importing file " + fileName);
long startTime = System.currentTimeMillis();
JavaRDD<Pair<Double>> data = DummyDataLoader.importCSV(sc, fileName);
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
System.out.println("File import time: " + totalTime + " ms");
System.out.println("Dataset size: " + data.count());
calculator = new SparkGradientDescentCalculator(sc, data, new LinearHypothesis(), alpha, maxIterations,
epsilon);
}
long startTime = System.currentTimeMillis();
Pair<Double> finalTheta = calculator.calculate(initialTheta0, initialTheta1);
System.out.printf("theta0 = %f, theta1 = %f\n", finalTheta.getFirst(), finalTheta.getSecond());
System.out.printf("y = %f * x + %f\n", finalTheta.getSecond(), finalTheta.getFirst());
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
System.out.println("Took " + totalTime + " ms to calculate.");
if (benchmarkMode) {
System.out.println("Doing additional benchmarking...");
System.out.println("Average: " + benchmark(calculator, initialTheta0, initialTheta1));
System.out.println("Compared to single basic method...");
calculator = new BasicGradientDescentCalculator(DummyDataLoader.importCSV(fileName), new LinearHypothesis(),
alpha, maxIterations, epsilon);
System.out.println("Average: " + benchmark(calculator, initialTheta0, initialTheta1));
}
}
} | [
"\"GDA_JAR_FILE\""
] | [] | [
"GDA_JAR_FILE"
] | [] | ["GDA_JAR_FILE"] | java | 1 | 0 | |
Bot1/instrumented_file/twitterRetweet.py | import boto3
import tweepy
import os
import json
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
ssm = boto3.client('ssm', region_name='us-west-2')
def authenticate_twitter():
xray_recorder.begin_subsegment('twitter-authentication')
xray_recorder.begin_subsegment('ssm-get_parameter')
consumer_key = (ssm.get_parameter(Name='twitter-consumer-key', WithDecryption=True))['Parameter']['Value']
consumer_secret = (ssm.get_parameter(Name='twitter-consumer-secret', WithDecryption=True))['Parameter']['Value']
access_token = (ssm.get_parameter(Name='twitter-access-token', WithDecryption=True))['Parameter']['Value']
access_token_secret = (ssm.get_parameter(Name='twitter-access-token-secret', WithDecryption=True))['Parameter']['Value']
xray_recorder.end_subsegment()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
xray_recorder.end_subsegment()
return api
def retweet(api, search, numberOfTweets):
xray_recorder.begin_subsegment('retweet')
for tweet in tweepy.Cursor(api.search, search).items(numberOfTweets):
try:
tweet_id = tweet.id
print(tweet_id)
if tweet.lang == "en":
tweet.retweet()
print('Retweeted the tweet')
else:
print('no retweet')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
xray_recorder.end_subsegment()
def lambda_handler(event, context):
try:
patch_all()
apiuse = authenticate_twitter()
searchString = os.environ['TweetSearchString']
numberOfTweets = 5
print('String to look for: {0} and nb of tweets to retrieve: {1}'.format(searchString,numberOfTweets))
retweet(apiuse, searchString, numberOfTweets)
except Exception as e:
print(e)
return {
"statusCode": 200,
"body": json.dumps(
{"message": "TwitterPoller"}
),
} | [] | [] | [
"TweetSearchString"
] | [] | ["TweetSearchString"] | python | 1 | 0 | |
cmd/config.go | package cmd
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"reflect"
"regexp"
"runtime"
"runtime/pprof"
"sort"
"strings"
"text/template"
"time"
"unicode"
"github.com/Masterminds/sprig/v3"
"github.com/coreos/go-semver/semver"
"github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/twpayne/go-shell"
"github.com/twpayne/go-vfs/v2"
vfsafero "github.com/twpayne/go-vfsafero/v2"
"github.com/twpayne/go-xdg/v4"
"golang.org/x/term"
"github.com/twpayne/chezmoi/assets/templates"
"github.com/twpayne/chezmoi/internal/chezmoi"
"github.com/twpayne/chezmoi/internal/git"
)
var defaultFormat = "json"
type purgeOptions struct {
binary bool
}
type templateConfig struct {
Options []string `mapstructure:"options"`
}
// A Config represents a configuration.
type Config struct {
version *semver.Version
versionInfo VersionInfo
versionStr string
bds *xdg.BaseDirectorySpecification
fs vfs.FS
configFile string
baseSystem chezmoi.System
sourceSystem chezmoi.System
destSystem chezmoi.System
persistentState chezmoi.PersistentState
color bool
// Global configuration, settable in the config file.
SourceDir string `mapstructure:"sourceDir"`
DestDir string `mapstructure:"destDir"`
Umask os.FileMode `mapstructure:"umask"`
Remove bool `mapstructure:"remove"`
Color string `mapstructure:"color"`
Data map[string]interface{} `mapstructure:"data"`
Template templateConfig `mapstructure:"template"`
UseBuiltinGit string `mapstructure:"useBuiltinGit"`
// Global configuration, not settable in the config file.
cpuProfile string
debug bool
dryRun bool
exclude *chezmoi.EntryTypeSet
force bool
homeDir string
keepGoing bool
noPager bool
noTTY bool
outputStr string
sourcePath bool
verbose bool
templateFuncs template.FuncMap
// Password manager configurations, settable in the config file.
Bitwarden bitwardenConfig `mapstructure:"bitwarden"`
Gopass gopassConfig `mapstructure:"gopass"`
Keepassxc keepassxcConfig `mapstructure:"keepassxc"`
Lastpass lastpassConfig `mapstructure:"lastpass"`
Onepassword onepasswordConfig `mapstructure:"onepassword"`
Pass passConfig `mapstructure:"pass"`
Secret secretConfig `mapstructure:"secret"`
Vault vaultConfig `mapstructure:"vault"`
// Encryption configurations, settable in the config file.
Encryption string `mapstructure:"encryption"`
AGE chezmoi.AGEEncryption `mapstructure:"age"`
GPG chezmoi.GPGEncryption `mapstructure:"gpg"`
// Password manager data.
gitHub gitHubData
keyring keyringData
// Command configurations, settable in the config file.
CD cdCmdConfig `mapstructure:"cd"`
Diff diffCmdConfig `mapstructure:"diff"`
Edit editCmdConfig `mapstructure:"edit"`
Git gitCmdConfig `mapstructure:"git"`
Merge mergeCmdConfig `mapstructure:"merge"`
// Command configurations, not settable in the config file.
add addCmdConfig
apply applyCmdConfig
archive archiveCmdConfig
data dataCmdConfig
dump dumpCmdConfig
executeTemplate executeTemplateCmdConfig
_import importCmdConfig
init initCmdConfig
managed managedCmdConfig
purge purgeCmdConfig
secretKeyring secretKeyringCmdConfig
state stateCmdConfig
status statusCmdConfig
update updateCmdConfig
upgrade upgradeCmdConfig
verify verifyCmdConfig
// Computed configuration.
configFileAbsPath chezmoi.AbsPath
homeDirAbsPath chezmoi.AbsPath
sourceDirAbsPath chezmoi.AbsPath
destDirAbsPath chezmoi.AbsPath
encryption chezmoi.Encryption
stdin io.Reader
stdout io.Writer
stderr io.Writer
ioregData ioregData
}
// A configOption sets and option on a Config.
type configOption func(*Config) error
type configState struct {
ConfigTemplateContentsSHA256 chezmoi.HexBytes `json:"configTemplateContentsSHA256" yaml:"configTemplateContentsSHA256"`
}
var (
persistentStateFilename = chezmoi.RelPath("chezmoistate.boltdb")
configStateKey = []byte("configState")
defaultAGEEncryptionConfig = chezmoi.AGEEncryption{
Command: "age",
Suffix: ".age",
}
defaultGPGEncryptionConfig = chezmoi.GPGEncryption{
Command: "gpg",
Suffix: ".asc",
}
identifierRx = regexp.MustCompile(`\A[\pL_][\pL\p{Nd}_]*\z`)
whitespaceRx = regexp.MustCompile(`\s+`)
)
// newConfig creates a new Config with the given options.
func newConfig(options ...configOption) (*Config, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
normalizedHomeDir, err := chezmoi.NormalizePath(homeDir)
if err != nil {
return nil, err
}
bds, err := xdg.NewBaseDirectorySpecification()
if err != nil {
return nil, err
}
c := &Config{
bds: bds,
fs: vfs.OSFS,
homeDir: homeDir,
DestDir: homeDir,
Umask: chezmoi.Umask,
Color: "auto",
Diff: diffCmdConfig{
Pager: os.Getenv("PAGER"),
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
},
Edit: editCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypeDirs | chezmoi.EntryTypeFiles | chezmoi.EntryTypeSymlinks | chezmoi.EntryTypeEncrypted),
},
Git: gitCmdConfig{
Command: "git",
},
Merge: mergeCmdConfig{
Command: "vimdiff",
},
Template: templateConfig{
Options: chezmoi.DefaultTemplateOptions,
},
templateFuncs: sprig.TxtFuncMap(),
exclude: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesNone),
Bitwarden: bitwardenConfig{
Command: "bw",
},
Gopass: gopassConfig{
Command: "gopass",
},
Keepassxc: keepassxcConfig{
Command: "keepassxc-cli",
},
Lastpass: lastpassConfig{
Command: "lpass",
},
Onepassword: onepasswordConfig{
Command: "op",
},
Pass: passConfig{
Command: "pass",
},
Vault: vaultConfig{
Command: "vault",
},
AGE: defaultAGEEncryptionConfig,
GPG: defaultGPGEncryptionConfig,
add: addCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
apply: applyCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
archive: archiveCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
data: dataCmdConfig{
format: defaultFormat,
},
dump: dumpCmdConfig{
format: defaultFormat,
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
_import: importCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
},
managed: managedCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypeDirs | chezmoi.EntryTypeFiles | chezmoi.EntryTypeSymlinks | chezmoi.EntryTypeEncrypted),
},
state: stateCmdConfig{
dump: stateDumpCmdConfig{
format: defaultFormat,
},
},
status: statusCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
update: updateCmdConfig{
apply: true,
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll),
recursive: true,
},
verify: verifyCmdConfig{
include: chezmoi.NewEntryTypeSet(chezmoi.EntryTypesAll &^ chezmoi.EntryTypeScripts),
recursive: true,
},
stdin: os.Stdin,
stdout: os.Stdout,
stderr: os.Stderr,
homeDirAbsPath: normalizedHomeDir,
}
for key, value := range map[string]interface{}{
"bitwarden": c.bitwardenTemplateFunc,
"bitwardenAttachment": c.bitwardenAttachmentTemplateFunc,
"bitwardenFields": c.bitwardenFieldsTemplateFunc,
"gitHubKeys": c.gitHubKeysTemplateFunc,
"gopass": c.gopassTemplateFunc,
"include": c.includeTemplateFunc,
"ioreg": c.ioregTemplateFunc,
"joinPath": c.joinPathTemplateFunc,
"keepassxc": c.keepassxcTemplateFunc,
"keepassxcAttribute": c.keepassxcAttributeTemplateFunc,
"keyring": c.keyringTemplateFunc,
"lastpass": c.lastpassTemplateFunc,
"lastpassRaw": c.lastpassRawTemplateFunc,
"lookPath": c.lookPathTemplateFunc,
"onepassword": c.onepasswordTemplateFunc,
"onepasswordDetailsFields": c.onepasswordDetailsFieldsTemplateFunc,
"onepasswordDocument": c.onepasswordDocumentTemplateFunc,
"output": c.outputTemplateFunc,
"pass": c.passTemplateFunc,
"secret": c.secretTemplateFunc,
"secretJSON": c.secretJSONTemplateFunc,
"stat": c.statTemplateFunc,
"vault": c.vaultTemplateFunc,
} {
c.addTemplateFunc(key, value)
}
for _, option := range options {
if err := option(c); err != nil {
return nil, err
}
}
c.configFile = string(defaultConfigFile(c.fs, c.bds))
c.SourceDir = string(defaultSourceDir(c.fs, c.bds))
c.homeDirAbsPath, err = chezmoi.NormalizePath(c.homeDir)
if err != nil {
return nil, err
}
c._import.destination = string(c.homeDirAbsPath)
return c, nil
}
func (c *Config) addTemplateFunc(key string, value interface{}) {
if _, ok := c.templateFuncs[key]; ok {
panic(fmt.Sprintf("%s: already defined", key))
}
c.templateFuncs[key] = value
}
type applyArgsOptions struct {
include *chezmoi.EntryTypeSet
recursive bool
umask os.FileMode
preApplyFunc chezmoi.PreApplyFunc
}
func (c *Config) applyArgs(targetSystem chezmoi.System, targetDirAbsPath chezmoi.AbsPath, args []string, options applyArgsOptions) error {
sourceState, err := c.sourceState()
if err != nil {
return err
}
var currentConfigTemplateContentsSHA256 []byte
configTemplateRelPath, _, configTemplateContents, err := c.findConfigTemplate()
if err != nil {
return err
}
if configTemplateRelPath != "" {
currentConfigTemplateContentsSHA256 = chezmoi.SHA256Sum(configTemplateContents)
}
var previousConfigTemplateContentsSHA256 []byte
if configStateData, err := c.persistentState.Get(chezmoi.ConfigStateBucket, configStateKey); err != nil {
return err
} else if configStateData != nil {
var configState configState
if err := json.Unmarshal(configStateData, &configState); err != nil {
return err
}
previousConfigTemplateContentsSHA256 = []byte(configState.ConfigTemplateContentsSHA256)
}
configTemplateContentsUnchanged := (currentConfigTemplateContentsSHA256 == nil && previousConfigTemplateContentsSHA256 == nil) ||
bytes.Equal(currentConfigTemplateContentsSHA256, previousConfigTemplateContentsSHA256)
if !configTemplateContentsUnchanged {
if c.force {
if configTemplateRelPath == "" {
if err := c.persistentState.Delete(chezmoi.ConfigStateBucket, configStateKey); err != nil {
return err
}
} else {
configStateValue, err := json.Marshal(configState{
ConfigTemplateContentsSHA256: chezmoi.HexBytes(currentConfigTemplateContentsSHA256),
})
if err != nil {
return err
}
if err := c.persistentState.Set(chezmoi.ConfigStateBucket, configStateKey, configStateValue); err != nil {
return err
}
}
} else {
c.errorf("warning: config file template has changed, run chezmoi init to regenerate config file\n")
}
}
applyOptions := chezmoi.ApplyOptions{
Include: options.include.Sub(c.exclude),
PreApplyFunc: options.preApplyFunc,
Umask: options.umask,
}
var targetRelPaths chezmoi.RelPaths
switch {
case len(args) == 0:
targetRelPaths = sourceState.TargetRelPaths()
case c.sourcePath:
targetRelPaths, err = c.targetRelPathsBySourcePath(sourceState, args)
if err != nil {
return err
}
default:
targetRelPaths, err = c.targetRelPaths(sourceState, args, targetRelPathsOptions{
mustBeInSourceState: true,
recursive: options.recursive,
})
if err != nil {
return err
}
}
keptGoingAfterErr := false
for _, targetRelPath := range targetRelPaths {
switch err := sourceState.Apply(targetSystem, c.destSystem, c.persistentState, targetDirAbsPath, targetRelPath, applyOptions); {
case errors.Is(err, chezmoi.Skip):
continue
case err != nil && c.keepGoing:
c.errorf("%v", err)
keptGoingAfterErr = true
case err != nil:
return err
}
}
if keptGoingAfterErr {
return ErrExitCode(1)
}
return nil
}
func (c *Config) cmdOutput(dirAbsPath chezmoi.AbsPath, name string, args []string) ([]byte, error) {
cmd := exec.Command(name, args...)
if dirAbsPath != "" {
dirRawAbsPath, err := c.baseSystem.RawPath(dirAbsPath)
if err != nil {
return nil, err
}
cmd.Dir = string(dirRawAbsPath)
}
return c.baseSystem.IdempotentCmdOutput(cmd)
}
func (c *Config) defaultPreApplyFunc(targetRelPath chezmoi.RelPath, targetEntryState, lastWrittenEntryState, actualEntryState *chezmoi.EntryState) error {
switch {
case targetEntryState.Type == chezmoi.EntryStateTypeScript:
return nil
case c.force:
return nil
case lastWrittenEntryState == nil:
return nil
case lastWrittenEntryState.Equivalent(actualEntryState):
return nil
}
// LATER add merge option
var choices []string
actualContents := actualEntryState.Contents()
targetContents := targetEntryState.Contents()
if actualContents != nil || targetContents != nil {
choices = append(choices, "diff")
}
choices = append(choices, "overwrite", "all-overwrite", "skip", "quit")
for {
switch choice, err := c.promptChoice(fmt.Sprintf("%s has changed since chezmoi last wrote it", targetRelPath), choices); {
case err != nil:
return err
case choice == "diff":
if err := c.diffFile(targetRelPath, actualContents, actualEntryState.Mode, targetContents, targetEntryState.Mode); err != nil {
return err
}
case choice == "overwrite":
return nil
case choice == "all-overwrite":
c.force = true
return nil
case choice == "skip":
return chezmoi.Skip
case choice == "quit":
return ErrExitCode(1)
default:
return nil
}
}
}
func (c *Config) defaultTemplateData() map[string]interface{} {
data := map[string]interface{}{
"arch": runtime.GOARCH,
"homeDir": c.homeDir,
"homedir": c.homeDir, // TODO Remove in version 2.1.
"os": runtime.GOOS,
"sourceDir": c.sourceDirAbsPath,
"version": map[string]interface{}{
"builtBy": c.versionInfo.BuiltBy,
"commit": c.versionInfo.Commit,
"date": c.versionInfo.Date,
"version": c.versionInfo.Version,
},
}
// Determine the user's username and group, if possible.
//
// user.Current and user.LookupGroupId in Go's standard library are
// generally unreliable, so work around errors if possible, or ignore them.
//
// If CGO is disabled, then the Go standard library falls back to parsing
// /etc/passwd and /etc/group, which will return incorrect results without
// error if the system uses an alternative password database such as NIS or
// LDAP.
//
// If CGO is enabled then user.Current and user.LookupGroupId will use the
// underlying libc functions, namely getpwuid_r and getgrnam_r. If linked
// with glibc this will return the correct result. If linked with musl then
// they will use musl's implementation which, like Go's non-CGO
// implementation, also only parses /etc/passwd and /etc/group and so also
// returns incorrect results without error if NIS or LDAP are being used.
//
// On Windows, the user's group ID returned by user.Current() is an SID and
// no further useful lookup is possible with Go's standard library.
//
// Since neither the username nor the group are likely widely used in
// templates, leave these variables unset if their values cannot be
// determined. Unset variables will trigger template errors if used,
// alerting the user to the problem and allowing them to find alternative
// solutions.
if currentUser, err := user.Current(); err == nil {
data["username"] = currentUser.Username
if runtime.GOOS != "windows" {
if group, err := user.LookupGroupId(currentUser.Gid); err == nil {
data["group"] = group.Name
} else {
log.Debug().
Str("gid", currentUser.Gid).
Err(err).
Msg("user.LookupGroupId")
}
}
} else {
log.Debug().
Err(err).
Msg("user.Current")
user, ok := os.LookupEnv("USER")
if ok {
data["username"] = user
} else {
log.Debug().
Str("key", "USER").
Bool("ok", ok).
Msg("os.LookupEnv")
}
}
if fqdnHostname, err := chezmoi.FQDNHostname(c.fs); err == nil && fqdnHostname != "" {
data["fqdnHostname"] = fqdnHostname
} else {
log.Debug().
Err(err).
Msg("chezmoi.EtcHostsFQDNHostname")
}
if hostname, err := os.Hostname(); err == nil {
data["hostname"] = strings.SplitN(hostname, ".", 2)[0]
} else {
log.Debug().
Err(err).
Msg("os.Hostname")
}
if kernelInfo, err := chezmoi.KernelInfo(c.fs); err == nil {
data["kernel"] = kernelInfo
} else {
log.Debug().
Err(err).
Msg("chezmoi.KernelInfo")
}
if osRelease, err := chezmoi.OSRelease(c.fs); err == nil {
data["osRelease"] = upperSnakeCaseToCamelCaseMap(osRelease)
} else {
log.Debug().
Err(err).
Msg("chezmoi.OSRelease")
}
return map[string]interface{}{
"chezmoi": data,
}
}
func (c *Config) destAbsPathInfos(sourceState *chezmoi.SourceState, args []string, recursive, follow bool) (map[chezmoi.AbsPath]os.FileInfo, error) {
destAbsPathInfos := make(map[chezmoi.AbsPath]os.FileInfo)
for _, arg := range args {
destAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
if _, err := destAbsPath.TrimDirPrefix(c.destDirAbsPath); err != nil {
return nil, err
}
if recursive {
if err := chezmoi.Walk(c.destSystem, destAbsPath, func(destAbsPath chezmoi.AbsPath, info os.FileInfo, err error) error {
if err != nil {
return err
}
if follow && info.Mode()&os.ModeType == os.ModeSymlink {
info, err = c.destSystem.Stat(destAbsPath)
if err != nil {
return err
}
}
return sourceState.AddDestAbsPathInfos(destAbsPathInfos, c.destSystem, destAbsPath, info)
}); err != nil {
return nil, err
}
} else {
var info os.FileInfo
if follow {
info, err = c.destSystem.Stat(destAbsPath)
} else {
info, err = c.destSystem.Lstat(destAbsPath)
}
if err != nil {
return nil, err
}
if err := sourceState.AddDestAbsPathInfos(destAbsPathInfos, c.destSystem, destAbsPath, info); err != nil {
return nil, err
}
}
}
return destAbsPathInfos, nil
}
func (c *Config) diffFile(path chezmoi.RelPath, fromData []byte, fromMode os.FileMode, toData []byte, toMode os.FileMode) error {
var sb strings.Builder
unifiedEncoder := diff.NewUnifiedEncoder(&sb, diff.DefaultContextLines)
if c.color {
unifiedEncoder.SetColor(diff.NewColorConfig())
}
diffPatch, err := chezmoi.DiffPatch(path, fromData, fromMode, toData, toMode)
if err != nil {
return err
}
if err := unifiedEncoder.Encode(diffPatch); err != nil {
return err
}
return c.diffPager(sb.String())
}
func (c *Config) diffPager(output string) error {
if c.noPager || c.Diff.Pager == "" {
return c.writeOutputString(output)
}
// If the pager command contains any spaces, assume that it is a full
// shell command to be executed via the user's shell. Otherwise, execute
// it directly.
var pagerCmd *exec.Cmd
if strings.IndexFunc(c.Diff.Pager, unicode.IsSpace) != -1 {
shell, _ := shell.CurrentUserShell()
pagerCmd = exec.Command(shell, "-c", c.Diff.Pager)
} else {
//nolint:gosec
pagerCmd = exec.Command(c.Diff.Pager)
}
pagerCmd.Stdin = bytes.NewBufferString(output)
pagerCmd.Stdout = c.stdout
pagerCmd.Stderr = c.stderr
return pagerCmd.Run()
}
func (c *Config) doPurge(purgeOptions *purgeOptions) error {
if c.persistentState != nil {
if err := c.persistentState.Close(); err != nil {
return err
}
}
absSlashPersistentStateFile := c.persistentStateFile()
absPaths := chezmoi.AbsPaths{
c.configFileAbsPath.Dir(),
c.configFileAbsPath,
absSlashPersistentStateFile,
c.sourceDirAbsPath,
}
if purgeOptions != nil && purgeOptions.binary {
executable, err := os.Executable()
if err == nil {
absPaths = append(absPaths, chezmoi.AbsPath(executable))
}
}
// Remove all paths that exist.
for _, absPath := range absPaths {
switch _, err := c.destSystem.Stat(absPath); {
case os.IsNotExist(err):
continue
case err != nil:
return err
}
if !c.force {
switch choice, err := c.promptChoice(fmt.Sprintf("Remove %s", absPath), yesNoAllQuit); {
case err != nil:
return err
case choice == "yes":
case choice == "no":
continue
case choice == "all":
c.force = true
case choice == "quit":
return nil
}
}
switch err := c.destSystem.RemoveAll(absPath); {
case os.IsPermission(err):
continue
case err != nil:
return err
}
}
return nil
}
// editor returns the path to the user's editor and any extra arguments.
func (c *Config) editor() (string, []string) {
// If the user has set and edit command then use it.
if c.Edit.Command != "" {
return c.Edit.Command, c.Edit.Args
}
// Prefer $VISUAL over $EDITOR and fallback to the OS's default editor.
editor := firstNonEmptyString(
os.Getenv("VISUAL"),
os.Getenv("EDITOR"),
defaultEditor,
)
// If editor is found, return it.
if path, err := exec.LookPath(editor); err == nil {
return path, nil
}
// Otherwise, if editor contains spaces, then assume that the first word is
// the editor and the rest are arguments.
components := whitespaceRx.Split(editor, -1)
if len(components) > 1 {
if path, err := exec.LookPath(components[0]); err == nil {
return path, components[1:]
}
}
// Fallback to editor only.
return editor, nil
}
func (c *Config) errorf(format string, args ...interface{}) {
fmt.Fprintf(c.stderr, "chezmoi: "+format, args...)
}
func (c *Config) execute(args []string) error {
rootCmd, err := c.newRootCmd()
if err != nil {
return err
}
rootCmd.SetArgs(args)
return rootCmd.Execute()
}
func (c *Config) findConfigTemplate() (chezmoi.RelPath, string, []byte, error) {
for _, ext := range viper.SupportedExts {
filename := chezmoi.RelPath(chezmoi.Prefix + "." + ext + chezmoi.TemplateSuffix)
contents, err := c.baseSystem.ReadFile(c.sourceDirAbsPath.Join(filename))
switch {
case os.IsNotExist(err):
continue
case err != nil:
return "", "", nil, err
}
return chezmoi.RelPath("chezmoi." + ext), ext, contents, nil
}
return "", "", nil, nil
}
func (c *Config) gitAutoAdd() (*git.Status, error) {
if err := c.run(c.sourceDirAbsPath, c.Git.Command, []string{"add", "."}); err != nil {
return nil, err
}
output, err := c.cmdOutput(c.sourceDirAbsPath, c.Git.Command, []string{"status", "--porcelain=v2"})
if err != nil {
return nil, err
}
return git.ParseStatusPorcelainV2(output)
}
func (c *Config) gitAutoCommit(status *git.Status) error {
if status.Empty() {
return nil
}
commitMessageTemplate, err := templates.FS.ReadFile("COMMIT_MESSAGE.tmpl")
if err != nil {
return err
}
commitMessageTmpl, err := template.New("commit_message").Funcs(c.templateFuncs).Parse(string(commitMessageTemplate))
if err != nil {
return err
}
commitMessage := strings.Builder{}
if err := commitMessageTmpl.Execute(&commitMessage, status); err != nil {
return err
}
return c.run(c.sourceDirAbsPath, c.Git.Command, []string{"commit", "--message", commitMessage.String()})
}
func (c *Config) gitAutoPush(status *git.Status) error {
if status.Empty() {
return nil
}
return c.run(c.sourceDirAbsPath, c.Git.Command, []string{"push"})
}
func (c *Config) makeRunEWithSourceState(runE func(*cobra.Command, []string, *chezmoi.SourceState) error) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
sourceState, err := c.sourceState()
if err != nil {
return err
}
return runE(cmd, args, sourceState)
}
}
func (c *Config) marshal(formatStr string, data interface{}) error {
var format chezmoi.Format
switch formatStr {
case "json":
format = chezmoi.JSONFormat
case "yaml":
format = chezmoi.YAMLFormat
default:
return fmt.Errorf("%s: unknown format", formatStr)
}
marshaledData, err := format.Marshal(data)
if err != nil {
return err
}
return c.writeOutput(marshaledData)
}
func (c *Config) newRootCmd() (*cobra.Command, error) {
rootCmd := &cobra.Command{
Use: "chezmoi",
Short: "Manage your dotfiles across multiple diverse machines, securely",
Version: c.versionStr,
PersistentPreRunE: c.persistentPreRunRootE,
PersistentPostRunE: c.persistentPostRunRootE,
SilenceErrors: true,
SilenceUsage: true,
}
persistentFlags := rootCmd.PersistentFlags()
persistentFlags.StringVar(&c.Color, "color", c.Color, "colorize diffs")
persistentFlags.StringVarP(&c.DestDir, "destination", "D", c.DestDir, "destination directory")
persistentFlags.BoolVar(&c.Remove, "remove", c.Remove, "remove targets")
persistentFlags.StringVarP(&c.SourceDir, "source", "S", c.SourceDir, "source directory")
persistentFlags.StringVar(&c.UseBuiltinGit, "use-builtin-git", c.UseBuiltinGit, "use builtin git")
for _, key := range []string{
"color",
"destination",
"remove",
"source",
} {
if err := viper.BindPFlag(key, persistentFlags.Lookup(key)); err != nil {
return nil, err
}
}
persistentFlags.StringVarP(&c.configFile, "config", "c", c.configFile, "config file")
persistentFlags.StringVar(&c.cpuProfile, "cpu-profile", c.cpuProfile, "write CPU profile to file")
persistentFlags.BoolVarP(&c.dryRun, "dry-run", "n", c.dryRun, "dry run")
persistentFlags.VarP(c.exclude, "exclude", "x", "exclude entry types")
persistentFlags.BoolVar(&c.force, "force", c.force, "force")
persistentFlags.BoolVarP(&c.keepGoing, "keep-going", "k", c.keepGoing, "keep going as far as possible after an error")
persistentFlags.BoolVar(&c.noPager, "no-pager", c.noPager, "do not use the pager")
persistentFlags.BoolVar(&c.noTTY, "no-tty", c.noTTY, "don't attempt to get a TTY for reading passwords")
persistentFlags.BoolVar(&c.sourcePath, "source-path", c.sourcePath, "specify targets by source path")
persistentFlags.BoolVarP(&c.verbose, "verbose", "v", c.verbose, "verbose")
persistentFlags.StringVarP(&c.outputStr, "output", "o", c.outputStr, "output file")
persistentFlags.BoolVar(&c.debug, "debug", c.debug, "write debug logs")
for _, err := range []error{
rootCmd.MarkPersistentFlagFilename("config"),
rootCmd.MarkPersistentFlagFilename("cpu-profile"),
rootCmd.MarkPersistentFlagDirname("destination"),
rootCmd.MarkPersistentFlagFilename("output"),
rootCmd.MarkPersistentFlagDirname("source"),
} {
if err != nil {
return nil, err
}
}
rootCmd.SetHelpCommand(c.newHelpCmd())
rootCmd.AddCommand(
c.newAddCmd(),
c.newApplyCmd(),
c.newArchiveCmd(),
c.newCatCmd(),
c.newCDCmd(),
c.newChattrCmd(),
c.newCompletionCmd(),
c.newDataCmd(),
c.newDiffCmd(),
c.newDocsCmd(),
c.newDoctorCmd(),
c.newDumpCmd(),
c.newEditCmd(),
c.newEditConfigCmd(),
c.newExecuteTemplateCmd(),
c.newForgetCmd(),
c.newGitCmd(),
c.newImportCmd(),
c.newInitCmd(),
c.newManagedCmd(),
c.newMergeCmd(),
c.newPurgeCmd(),
c.newRemoveCmd(),
c.newSecretCmd(),
c.newSourcePathCmd(),
c.newStateCmd(),
c.newStatusCmd(),
c.newUnmanagedCmd(),
c.newUpdateCmd(),
c.newUpgradeCmd(),
c.newVerifyCmd(),
)
return rootCmd, nil
}
func (c *Config) persistentPostRunRootE(cmd *cobra.Command, args []string) error {
defer pprof.StopCPUProfile()
if c.persistentState != nil {
if err := c.persistentState.Close(); err != nil {
return err
}
}
if boolAnnotation(cmd, modifiesConfigFile) {
// Warn the user of any errors reading the config file.
v := viper.New()
v.SetFs(vfsafero.NewAferoFS(c.fs))
v.SetConfigFile(string(c.configFileAbsPath))
err := v.ReadInConfig()
if err == nil {
err = v.Unmarshal(&Config{})
}
if err != nil {
cmd.Printf("warning: %s: %v\n", c.configFileAbsPath, err)
}
}
if boolAnnotation(cmd, modifiesSourceDirectory) {
var status *git.Status
if c.Git.AutoAdd || c.Git.AutoCommit || c.Git.AutoPush {
var err error
status, err = c.gitAutoAdd()
if err != nil {
return err
}
}
if c.Git.AutoCommit || c.Git.AutoPush {
if err := c.gitAutoCommit(status); err != nil {
return err
}
}
if c.Git.AutoPush {
if err := c.gitAutoPush(status); err != nil {
return err
}
}
}
return nil
}
func (c *Config) persistentPreRunRootE(cmd *cobra.Command, args []string) error {
if c.cpuProfile != "" {
f, err := os.Create(c.cpuProfile)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(f); err != nil {
return err
}
}
var err error
c.configFileAbsPath, err = chezmoi.NewAbsPathFromExtPath(c.configFile, c.homeDirAbsPath)
if err != nil {
return err
}
if err := c.readConfig(); err != nil {
if !boolAnnotation(cmd, doesNotRequireValidConfig) {
return fmt.Errorf("invalid config: %s: %w", c.configFile, err)
}
cmd.Printf("warning: %s: %v\n", c.configFile, err)
}
if c.Color == "" || strings.ToLower(c.Color) == "auto" {
if _, ok := os.LookupEnv("NO_COLOR"); ok {
c.color = false
} else if stdout, ok := c.stdout.(*os.File); ok {
c.color = term.IsTerminal(int(stdout.Fd()))
} else {
c.color = false
}
} else if color, err := parseBool(c.Color); err == nil {
c.color = color
} else if !boolAnnotation(cmd, doesNotRequireValidConfig) {
return fmt.Errorf("%s: invalid color value", c.Color)
}
if c.color {
if err := enableVirtualTerminalProcessing(c.stdout); err != nil {
return err
}
}
if c.sourceDirAbsPath, err = chezmoi.NewAbsPathFromExtPath(c.SourceDir, c.homeDirAbsPath); err != nil {
return err
}
if c.destDirAbsPath, err = chezmoi.NewAbsPathFromExtPath(c.DestDir, c.homeDirAbsPath); err != nil {
return err
}
log.Logger = log.Output(zerolog.NewConsoleWriter(
func(w *zerolog.ConsoleWriter) {
w.Out = c.stderr
w.NoColor = !c.color
w.TimeFormat = time.RFC3339
},
))
if c.debug {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
} else {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
c.baseSystem = chezmoi.NewRealSystem(c.fs)
if c.debug {
c.baseSystem = chezmoi.NewDebugSystem(c.baseSystem)
}
switch {
case cmd.Annotations[persistentStateMode] == persistentStateModeEmpty:
c.persistentState = chezmoi.NewMockPersistentState()
case cmd.Annotations[persistentStateMode] == persistentStateModeReadOnly:
persistentStateFile := c.persistentStateFile()
c.persistentState, err = chezmoi.NewBoltPersistentState(c.baseSystem, persistentStateFile, chezmoi.BoltPersistentStateReadOnly)
if err != nil {
return err
}
case cmd.Annotations[persistentStateMode] == persistentStateModeReadMockWrite:
fallthrough
case cmd.Annotations[persistentStateMode] == persistentStateModeReadWrite && c.dryRun:
persistentStateFile := c.persistentStateFile()
persistentState, err := chezmoi.NewBoltPersistentState(c.baseSystem, persistentStateFile, chezmoi.BoltPersistentStateReadOnly)
if err != nil {
return err
}
dryRunPeristentState := chezmoi.NewMockPersistentState()
if err := persistentState.CopyTo(dryRunPeristentState); err != nil {
return err
}
if err := persistentState.Close(); err != nil {
return err
}
c.persistentState = dryRunPeristentState
case cmd.Annotations[persistentStateMode] == persistentStateModeReadWrite:
persistentStateFile := c.persistentStateFile()
c.persistentState, err = chezmoi.NewBoltPersistentState(c.baseSystem, persistentStateFile, chezmoi.BoltPersistentStateReadWrite)
if err != nil {
return err
}
default:
c.persistentState = nil
}
if c.debug && c.persistentState != nil {
c.persistentState = chezmoi.NewDebugPersistentState(c.persistentState)
}
c.sourceSystem = c.baseSystem
c.destSystem = c.baseSystem
if !boolAnnotation(cmd, modifiesDestinationDirectory) {
c.destSystem = chezmoi.NewReadOnlySystem(c.destSystem)
}
if !boolAnnotation(cmd, modifiesSourceDirectory) {
c.sourceSystem = chezmoi.NewReadOnlySystem(c.sourceSystem)
}
if c.dryRun {
c.sourceSystem = chezmoi.NewDryRunSystem(c.sourceSystem)
c.destSystem = chezmoi.NewDryRunSystem(c.destSystem)
}
if c.verbose {
c.sourceSystem = chezmoi.NewGitDiffSystem(c.sourceSystem, c.stdout, c.sourceDirAbsPath, c.color)
c.destSystem = chezmoi.NewGitDiffSystem(c.destSystem, c.stdout, c.destDirAbsPath, c.color)
}
switch c.Encryption {
case "age":
c.encryption = &c.AGE
case "gpg":
c.encryption = &c.GPG
case "":
// Detect encryption if any non-default configuration is set, preferring
// gpg for backwards compatibility.
switch {
case !reflect.DeepEqual(c.GPG, defaultGPGEncryptionConfig):
c.encryption = &c.GPG
case !reflect.DeepEqual(c.AGE, defaultAGEEncryptionConfig):
c.encryption = &c.AGE
default:
c.encryption = chezmoi.NoEncryption{}
}
default:
return fmt.Errorf("%s: unknown encryption", c.Encryption)
}
if c.debug {
c.encryption = chezmoi.NewDebugEncryption(c.encryption)
}
if boolAnnotation(cmd, requiresConfigDirectory) {
if err := chezmoi.MkdirAll(c.baseSystem, c.configFileAbsPath.Dir(), 0o777); err != nil {
return err
}
}
if boolAnnotation(cmd, requiresSourceDirectory) {
if err := chezmoi.MkdirAll(c.baseSystem, c.sourceDirAbsPath, 0o777); err != nil {
return err
}
}
if boolAnnotation(cmd, runsCommands) {
if runtime.GOOS == "linux" && c.bds.RuntimeDir != "" {
// Snap sets the $XDG_RUNTIME_DIR environment variable to
// /run/user/$uid/snap.$snap_name, but does not create this
// directory. Consequently, any spawned processes that need
// $XDG_DATA_DIR will fail. As a work-around, create the directory
// if it does not exist. See
// https://forum.snapcraft.io/t/wayland-dconf-and-xdg-runtime-dir/186/13.
if err := chezmoi.MkdirAll(c.baseSystem, chezmoi.AbsPath(c.bds.RuntimeDir), 0o700); err != nil {
return err
}
}
}
return nil
}
func (c *Config) persistentStateFile() chezmoi.AbsPath {
if c.configFile != "" {
return chezmoi.AbsPath(c.configFile).Dir().Join(persistentStateFilename)
}
for _, configDir := range c.bds.ConfigDirs {
configDirAbsPath := chezmoi.AbsPath(configDir)
persistentStateFile := configDirAbsPath.Join(chezmoi.RelPath("chezmoi"), persistentStateFilename)
if _, err := os.Stat(string(persistentStateFile)); err == nil {
return persistentStateFile
}
}
return defaultConfigFile(c.fs, c.bds).Dir().Join(persistentStateFilename)
}
func (c *Config) promptChoice(prompt string, choices []string) (string, error) {
promptWithChoices := fmt.Sprintf("%s [%s]? ", prompt, strings.Join(choices, ","))
abbreviations := uniqueAbbreviations(choices)
for {
line, err := c.readLine(promptWithChoices)
if err != nil {
return "", err
}
if value, ok := abbreviations[strings.TrimSpace(line)]; ok {
return value, nil
}
}
}
func (c *Config) readConfig() error {
v := viper.New()
v.SetConfigFile(string(c.configFileAbsPath))
v.SetFs(vfsafero.NewAferoFS(c.fs))
switch err := v.ReadInConfig(); {
case os.IsNotExist(err):
return nil
case err != nil:
return err
}
if err := v.Unmarshal(c); err != nil {
return err
}
return c.validateData()
}
func (c *Config) readLine(prompt string) (string, error) {
var line string
if err := c.withTerminal(prompt, func(t terminal) error {
var err error
line, err = t.ReadLine()
return err
}); err != nil {
return "", err
}
return line, nil
}
func (c *Config) readPassword(prompt string) (string, error) {
var password string
if err := c.withTerminal("", func(t terminal) error {
var err error
password, err = t.ReadPassword(prompt)
return err
}); err != nil {
return "", err
}
return password, nil
}
func (c *Config) run(dir chezmoi.AbsPath, name string, args []string) error {
cmd := exec.Command(name, args...)
if dir != "" {
dirRawAbsPath, err := c.baseSystem.RawPath(dir)
if err != nil {
return err
}
cmd.Dir = string(dirRawAbsPath)
}
cmd.Stdin = c.stdin
cmd.Stdout = c.stdout
cmd.Stderr = c.stderr
return c.baseSystem.RunCmd(cmd)
}
func (c *Config) runEditor(args []string) error {
editor, editorArgs := c.editor()
return c.run("", editor, append(editorArgs, args...))
}
func (c *Config) sourceAbsPaths(sourceState *chezmoi.SourceState, args []string) (chezmoi.AbsPaths, error) {
targetRelPaths, err := c.targetRelPaths(sourceState, args, targetRelPathsOptions{
mustBeInSourceState: true,
})
if err != nil {
return nil, err
}
sourceAbsPaths := make(chezmoi.AbsPaths, 0, len(targetRelPaths))
for _, targetRelPath := range targetRelPaths {
sourceAbsPath := c.sourceDirAbsPath.Join(sourceState.MustEntry(targetRelPath).SourceRelPath().RelPath())
sourceAbsPaths = append(sourceAbsPaths, sourceAbsPath)
}
return sourceAbsPaths, nil
}
func (c *Config) sourceState() (*chezmoi.SourceState, error) {
s := chezmoi.NewSourceState(
chezmoi.WithDefaultTemplateDataFunc(c.defaultTemplateData),
chezmoi.WithDestDir(c.destDirAbsPath),
chezmoi.WithEncryption(c.encryption),
chezmoi.WithPriorityTemplateData(c.Data),
chezmoi.WithSourceDir(c.sourceDirAbsPath),
chezmoi.WithSystem(c.sourceSystem),
chezmoi.WithTemplateFuncs(c.templateFuncs),
chezmoi.WithTemplateOptions(c.Template.Options),
)
if err := s.Read(); err != nil {
return nil, err
}
if minVersion := s.MinVersion(); c.version != nil && !isDevVersion(c.version) && c.version.LessThan(minVersion) {
return nil, fmt.Errorf("source state requires version %s or later, chezmoi is version %s", minVersion, c.version)
}
return s, nil
}
type targetRelPathsOptions struct {
mustBeInSourceState bool
recursive bool
}
func (c *Config) targetRelPaths(sourceState *chezmoi.SourceState, args []string, options targetRelPathsOptions) (chezmoi.RelPaths, error) {
targetRelPaths := make(chezmoi.RelPaths, 0, len(args))
for _, arg := range args {
argAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
targetRelPath, err := argAbsPath.TrimDirPrefix(c.destDirAbsPath)
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
if options.mustBeInSourceState {
if _, ok := sourceState.Entry(targetRelPath); !ok {
return nil, fmt.Errorf("%s: not in source state", arg)
}
}
targetRelPaths = append(targetRelPaths, targetRelPath)
if options.recursive {
parentRelPath := targetRelPath
// FIXME we should not call s.TargetRelPaths() here - risk of accidentally quadratic
for _, targetRelPath := range sourceState.TargetRelPaths() {
if _, err := targetRelPath.TrimDirPrefix(parentRelPath); err == nil {
targetRelPaths = append(targetRelPaths, targetRelPath)
}
}
}
}
if len(targetRelPaths) == 0 {
return nil, nil
}
// Sort and de-duplicate targetRelPaths in place.
sort.Sort(targetRelPaths)
n := 1
for i := 1; i < len(targetRelPaths); i++ {
if targetRelPaths[i] != targetRelPaths[i-1] {
targetRelPaths[n] = targetRelPaths[i]
n++
}
}
return targetRelPaths[:n], nil
}
func (c *Config) targetRelPathsBySourcePath(sourceState *chezmoi.SourceState, args []string) (chezmoi.RelPaths, error) {
targetRelPaths := make(chezmoi.RelPaths, 0, len(args))
targetRelPathsBySourceRelPath := make(map[chezmoi.RelPath]chezmoi.RelPath)
for targetRelPath, sourceStateEntry := range sourceState.Entries() {
sourceRelPath := sourceStateEntry.SourceRelPath().RelPath()
targetRelPathsBySourceRelPath[sourceRelPath] = targetRelPath
}
for _, arg := range args {
argAbsPath, err := chezmoi.NewAbsPathFromExtPath(arg, c.homeDirAbsPath)
if err != nil {
return nil, err
}
sourceRelPath, err := argAbsPath.TrimDirPrefix(c.sourceDirAbsPath)
if err != nil {
return nil, err
}
targetRelPath, ok := targetRelPathsBySourceRelPath[sourceRelPath]
if !ok {
return nil, fmt.Errorf("%s: not in source state", arg)
}
targetRelPaths = append(targetRelPaths, targetRelPath)
}
return targetRelPaths, nil
}
func (c *Config) useBuiltinGit() (bool, error) {
if c.UseBuiltinGit == "" || strings.ToLower(c.UseBuiltinGit) == "auto" {
if _, err := exec.LookPath(c.Git.Command); err == nil {
return false, nil
}
return true, nil
}
return parseBool(c.UseBuiltinGit)
}
func (c *Config) validateData() error {
return validateKeys(c.Data, identifierRx)
}
func (c *Config) withTerminal(prompt string, f func(terminal) error) error {
if c.noTTY {
return f(newNullTerminal(c.stdin))
}
if runtime.GOOS == "windows" {
return f(newDumbTerminal(c.stdin, c.stdout, prompt))
}
if stdinFile, ok := c.stdin.(*os.File); ok && term.IsTerminal(int(stdinFile.Fd())) {
fd := int(stdinFile.Fd())
width, height, err := term.GetSize(fd)
if err != nil {
return err
}
oldState, err := term.MakeRaw(fd)
if err != nil {
return err
}
defer func() {
_ = term.Restore(fd, oldState)
}()
t := term.NewTerminal(struct {
io.Reader
io.Writer
}{
Reader: c.stdin,
Writer: c.stdout,
}, prompt)
if err := t.SetSize(width, height); err != nil {
return err
}
return f(t)
}
devTTY, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
if err != nil {
return err
}
defer devTTY.Close()
fd := int(devTTY.Fd())
width, height, err := term.GetSize(fd)
if err != nil {
return err
}
oldState, err := term.MakeRaw(fd)
if err != nil {
return err
}
defer func() {
_ = term.Restore(fd, oldState)
}()
t := term.NewTerminal(devTTY, prompt)
if err := t.SetSize(width, height); err != nil {
return err
}
return f(t)
}
func (c *Config) writeOutput(data []byte) error {
if c.outputStr == "" || c.outputStr == "-" {
_, err := c.stdout.Write(data)
return err
}
return c.baseSystem.WriteFile(chezmoi.AbsPath(c.outputStr), data, 0o666)
}
func (c *Config) writeOutputString(data string) error {
return c.writeOutput([]byte(data))
}
// isDevVersion returns true if version is a development version (i.e. that the
// major, minor, and patch version numbers are all zero).
func isDevVersion(v *semver.Version) bool {
return v.Major == 0 && v.Minor == 0 && v.Patch == 0
}
// withVersionInfo sets the version information.
func withVersionInfo(versionInfo VersionInfo) configOption {
return func(c *Config) error {
var version *semver.Version
var versionElems []string
if versionInfo.Version != "" {
var err error
version, err = semver.NewVersion(strings.TrimPrefix(versionInfo.Version, "v"))
if err != nil {
return err
}
versionElems = append(versionElems, "v"+version.String())
} else {
versionElems = append(versionElems, "dev")
}
if versionInfo.Commit != "" {
versionElems = append(versionElems, "commit "+versionInfo.Commit)
}
if versionInfo.Date != "" {
versionElems = append(versionElems, "built at "+versionInfo.Date)
}
if versionInfo.BuiltBy != "" {
versionElems = append(versionElems, "built by "+versionInfo.BuiltBy)
}
c.version = version
c.versionInfo = versionInfo
c.versionStr = strings.Join(versionElems, ", ")
return nil
}
}
| [
"\"PAGER\"",
"\"VISUAL\"",
"\"EDITOR\""
] | [] | [
"PAGER",
"VISUAL",
"EDITOR"
] | [] | ["PAGER", "VISUAL", "EDITOR"] | go | 3 | 0 | |
pkg/resources/statefulset.go | // Copyright 2020 Oracle and/or its affiliates. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resources
import (
"fmt"
"os"
"strings"
"github.com/ocklin/ndb-operator/pkg/apis/ndbcontroller/v1alpha1"
"github.com/ocklin/ndb-operator/pkg/constants"
"github.com/ocklin/ndb-operator/pkg/version"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog"
)
const mgmdVolumeName = "mgmdvolume"
const mgmdName = "mgmd"
const ndbImage = "mysql/mysql-cluster"
const ndbVersion = "8.0.22"
const ndbdName = "ndbd"
const ndbAgentName = "ndb-agent"
const ndbAgentImage = "ndb-agent"
const ndbAgentVersion = "1.0.0"
type StatefulSetInterface interface {
NewStatefulSet(cluster *v1alpha1.Ndb) *apps.StatefulSet
GetName() string
}
type baseStatefulSet struct {
typeName string
clusterName string
}
func NewMgmdStatefulSet(cluster *v1alpha1.Ndb) *baseStatefulSet {
return &baseStatefulSet{typeName: "mgmd", clusterName: cluster.Name}
}
func NewNdbdStatefulSet(cluster *v1alpha1.Ndb) *baseStatefulSet {
return &baseStatefulSet{typeName: "ndbd", clusterName: cluster.Name}
}
func volumeMounts(cluster *v1alpha1.Ndb) []v1.VolumeMount {
var mounts []v1.VolumeMount
mounts = append(mounts, v1.VolumeMount{
Name: mgmdVolumeName,
MountPath: constants.DataDir,
})
// A user may explicitly define a config file for ndb via config map
if cluster.Spec.Config != nil {
mounts = append(mounts, v1.VolumeMount{
Name: "config-volume",
MountPath: constants.DataDir + "/config",
})
}
return mounts
}
func agentContainer(ndb *v1alpha1.Ndb, ndbAgentImage string) v1.Container {
agentVersion := version.GetBuildVersion()
if version := os.Getenv("NDB_AGENT_VERSION"); version != "" {
agentVersion = version
}
image := fmt.Sprintf("%s:%s", ndbAgentImage, agentVersion)
klog.Infof("Creating agent container from image %s", image)
return v1.Container{
Name: ndbAgentName,
Image: image,
Ports: []v1.ContainerPort{
{
ContainerPort: 8080,
},
},
// agent requires access to ndbd and mgmd volumes
VolumeMounts: volumeMounts(ndb),
Env: []v1.EnvVar{},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(8080),
},
},
},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(8080),
},
},
},
}
}
func (bss *baseStatefulSet) getMgmdHostname(ndb *v1alpha1.Ndb) string {
dnsZone := fmt.Sprintf("%s.svc.cluster.local", ndb.Namespace)
mgmHostnames := ""
for i := 0; i < (int)(*ndb.Spec.Mgmd.NodeCount); i++ {
if i > 0 {
mgmHostnames += ","
}
mgmHostnames += fmt.Sprintf("%s-%d.%s.%s", bss.clusterName+"-mgmd", i, ndb.GetManagementServiceName(), dnsZone)
}
return mgmHostnames
}
func (bss *baseStatefulSet) getConnectstring(ndb *v1alpha1.Ndb) string {
dnsZone := fmt.Sprintf("%s.svc.cluster.local", ndb.Namespace)
port := "1186"
mgmHostnames := ""
for i := 0; i < (int)(*ndb.Spec.Mgmd.NodeCount); i++ {
if i > 0 {
mgmHostnames += ","
}
mgmHostnames += fmt.Sprintf("%s-%d.%s.%s:%s", bss.clusterName+"-mgmd", i, ndb.GetManagementServiceName(), dnsZone, port)
}
return mgmHostnames
}
/*
Creates comma seperated list of all FQ hostnames of data nodes
*/
func (bss *baseStatefulSet) getNdbdHostnames(ndb *v1alpha1.Ndb) string {
dnsZone := fmt.Sprintf("%s.svc.cluster.local", ndb.Namespace)
ndbHostnames := ""
for i := 0; i < (int)(*ndb.Spec.Ndbd.NodeCount); i++ {
if i > 0 {
ndbHostnames += ","
}
ndbHostnames += fmt.Sprintf("%s-%d.%s.%s", bss.clusterName+"-ndbd", i, ndb.GetDataNodeServiceName(), dnsZone)
}
return ndbHostnames
}
// Builds the Ndb operator container for a mgmd.
func (bss *baseStatefulSet) mgmdContainer(ndb *v1alpha1.Ndb) v1.Container {
runWithEntrypoint := false
cmd := ""
environment := []v1.EnvVar{}
imageName := fmt.Sprintf("%s:%s", ndbImage, ndbVersion)
if runWithEntrypoint {
args := []string{
"ndb_mgmd",
}
cmdArgs := strings.Join(args, " ")
cmd = fmt.Sprintf(`/entrypoint.sh %s`, cmdArgs)
mgmdHostname := bss.getMgmdHostname(ndb)
ndbdHostnames := bss.getNdbdHostnames(ndb)
environment = []v1.EnvVar{
{
Name: "NDB_REPLICAS",
Value: fmt.Sprintf("%d", *ndb.Spec.Ndbd.NoOfReplicas),
},
{
Name: "NDB_MGMD_HOSTS",
Value: mgmdHostname,
},
{
Name: "NDB_NDBD_HOSTS",
Value: ndbdHostnames,
},
}
klog.Infof("Creating mgmd container from image %s with hostnames mgmd: %s, ndbd: %s",
imageName, mgmdHostname, ndbdHostnames)
} else {
args := []string{
"-f", "/var/lib/ndb/config/config.ini",
"--configdir=/var/lib/ndb",
"--initial",
"--nodaemon",
"--config-cache=0",
"-v",
}
cmdArgs := strings.Join(args, " ")
cmd = fmt.Sprintf(`/usr/sbin/ndb_mgmd %s`, cmdArgs)
klog.Infof("Creating mgmd container from image %s", imageName)
}
return v1.Container{
Name: mgmdName,
Image: imageName,
Ports: []v1.ContainerPort{
{
ContainerPort: 1186,
},
},
VolumeMounts: volumeMounts(ndb),
Command: []string{"/bin/bash", "-ecx", cmd},
ImagePullPolicy: v1.PullIfNotPresent, //v1.PullNever,
Env: environment,
}
}
// Builds the Ndb operator container for a mgmd.
func (bss *baseStatefulSet) ndbmtdContainer(ndb *v1alpha1.Ndb) v1.Container {
imageName := fmt.Sprintf("%s:%s", ndbImage, ndbVersion)
connectString := bss.getConnectstring(ndb)
args := []string{
"-c", connectString,
"--nodaemon",
"-v",
}
cmdArgs := strings.Join(args, " ")
cmd := fmt.Sprintf(`/usr/sbin/ndbmtd %s`, cmdArgs)
klog.Infof("Creating ndbmtd container from image %s", imageName)
return v1.Container{
Name: ndbdName,
Image: imageName,
Ports: []v1.ContainerPort{
{
ContainerPort: 1186,
},
},
VolumeMounts: volumeMounts(ndb),
Command: []string{"/bin/bash", "-ecx", cmd},
ImagePullPolicy: v1.PullIfNotPresent, //v1.PullNever,
}
}
func (bss *baseStatefulSet) GetName() string {
return bss.clusterName + "-" + bss.typeName
}
// NewForCluster creates a new StatefulSet for the given Cluster.
func (bss *baseStatefulSet) NewStatefulSet(ndb *v1alpha1.Ndb) *apps.StatefulSet {
// If a PV isn't specified just use a EmptyDir volume
var podVolumes = []v1.Volume{}
podVolumes = append(podVolumes,
v1.Volume{
Name: mgmdVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: "",
},
},
},
)
// add the configmap generated with config.ini
podVolumes = append(podVolumes, v1.Volume{
Name: "config-volume",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: ndb.GetConfigMapName(),
},
},
},
})
//}
containers := []v1.Container{}
serviceaccount := ""
var podLabels map[string]string
replicas := func(i int32) *int32 { return &i }((0))
svcName := ""
if bss.typeName == "mgmd" {
containers = []v1.Container{
bss.mgmdContainer(ndb),
//agentContainer(ndb, ndbAgentImage),
}
serviceaccount = "ndb-agent"
replicas = ndb.Spec.Mgmd.NodeCount
podLabels = ndb.GetManagementNodeLabels()
svcName = ndb.GetManagementServiceName()
} else {
containers = []v1.Container{
bss.ndbmtdContainer(ndb),
//agentContainer(ndb, ndbAgentImage),
}
serviceaccount = "ndb-agent"
replicas = ndb.Spec.Ndbd.NodeCount
podLabels = ndb.GetDataNodeLabels()
svcName = ndb.GetDataNodeServiceName()
}
podspec := v1.PodSpec{
Containers: containers,
Volumes: podVolumes,
}
if serviceaccount != "" {
podspec.ServiceAccountName = "ndb-agent"
}
ss := &apps.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: bss.GetName(),
Labels: podLabels, // must match templates
// could have a owner reference here
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(ndb, schema.GroupVersionKind{
Group: v1.SchemeGroupVersion.Group,
Version: v1.SchemeGroupVersion.Version,
Kind: "Ndb",
}),
},
},
Spec: apps.StatefulSetSpec{
UpdateStrategy: apps.StatefulSetUpdateStrategy{
// we want to be able to control ndbd node restarts directly
Type: apps.OnDeleteStatefulSetStrategyType,
},
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Replicas: replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: bss.GetName(),
Labels: podLabels,
Annotations: map[string]string{},
},
Spec: podspec,
},
ServiceName: svcName,
},
}
return ss
}
| [
"\"NDB_AGENT_VERSION\""
] | [] | [
"NDB_AGENT_VERSION"
] | [] | ["NDB_AGENT_VERSION"] | go | 1 | 0 | |
model_zoo/official/cv/mobilenetv2/src/launch.py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""launch train script"""
import os
import sys
import subprocess
import shutil
from args import launch_parse_args
def main():
print("start", __file__)
args = launch_parse_args()
print(args)
visible_devices = args.visible_devices.split(',')
assert os.path.isfile(args.training_script)
assert len(visible_devices) >= args.nproc_per_node
print('visible_devices:{}'.format(visible_devices))
# spawn the processes
processes = []
cmds = []
log_files = []
env = os.environ.copy()
env['RANK_SIZE'] = str(args.nproc_per_node)
cur_path = os.getcwd()
for rank_id in range(0, args.nproc_per_node):
os.chdir(cur_path)
device_id = visible_devices[rank_id]
rank_dir = os.path.join(cur_path, 'rank{}'.format(rank_id))
env['RANK_ID'] = str(rank_id)
env['DEVICE_ID'] = str(device_id)
if os.path.exists(rank_dir):
shutil.rmtree(rank_dir)
os.mkdir(rank_dir)
os.chdir(rank_dir)
cmd = [sys.executable, '-u']
cmd.append(args.training_script)
cmd.extend(args.training_script_args)
log_file = open(f'{rank_dir}/log{rank_id}.log', 'w')
process = subprocess.Popen(cmd, stdout=log_file, stderr=log_file, env=env)
processes.append(process)
cmds.append(cmd)
log_files.append(log_file)
for process, cmd, log_file in zip(processes, cmds, log_files):
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process, cmd=cmd)
log_file.close()
if __name__ == "__main__":
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/commandbus/memory/command_bus_test.go | package commandbus
import (
"runtime"
"testing"
"github.com/vardius/golog"
)
func TestNew(t *testing.T) {
bus := New(runtime.NumCPU())
if bus == nil {
t.Fail()
}
}
func TestWithLogger(t *testing.T) {
logger := golog.New("debug")
parent := New(runtime.NumCPU())
bus := WithLogger(parent, logger)
if bus == nil {
t.Fail()
}
}
func TestNewLoggable(t *testing.T) {
logger := golog.New("debug")
bus := NewLoggable(runtime.NumCPU(), logger)
if bus == nil {
t.Fail()
}
}
| [] | [] | [] | [] | [] | go | null | null | null |
pkg/checkpoint/checkpoint.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package checkpoint
import (
"bufio"
"context"
"encoding/json"
"io"
"os"
"strings"
"sync/atomic"
"github.com/pingcap/errors"
"github.com/pingcap/tiup/pkg/logger/log"
"github.com/pingcap/tiup/pkg/queue"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/sync/semaphore"
)
type contextKey string
const (
semKey = contextKey("CHECKPOINT_SEMAPHORE")
goroutineKey = contextKey("CHECKPOINT_GOROUTINE")
)
var (
checkpoint *CheckPoint
checkfields []FieldSet
// DebugCheckpoint is a switch used to debug if:
// - The context passed to checkpoint is generated by checkpoint.NewContext
// - multilple context acquire applied to the same context belone to the sampe goroutine
DebugCheckpoint = os.Getenv("DEBUG_CHECKPOINT") == "1"
)
// SetCheckPoint set global checkpoint for executor
func SetCheckPoint(file string) error {
pointReader, err := os.Open(file)
if err != nil {
return errors.AddStack(err)
}
defer pointReader.Close()
checkpoint, err = NewCheckPoint(pointReader)
if err != nil {
return err
}
return nil
}
// Acquire wraps CheckPoint.Acquire
func Acquire(ctx context.Context, point map[string]interface{}) *Point {
if ctx.Value(goroutineKey) == nil || ctx.Value(semKey) == nil {
if DebugCheckpoint {
panic("the context passed to checkpoint.Acquire is not generated by checkpoint.NewContext")
}
log.Debugf("context missing for checkpoint, the result of replaying this operation may be unexpected!")
ctx = NewContext(ctx)
}
// Check goroutine if we are in test
gptr := ctx.Value(goroutineKey).(*goroutineLock)
g := atomic.LoadUint64((*uint64)(gptr))
if g == 0 {
atomic.StoreUint64((*uint64)(gptr), uint64(newGoroutineLock()))
} else {
goroutineLock(g).check()
}
// If checkpoint is disabled, return a mock point
if checkpoint == nil {
return &Point{nil, nil, true}
}
return checkpoint.Acquire(ctx, point)
}
// NewContext wraps given context with value needed by checkpoint
func NewContext(ctx context.Context) context.Context {
switch {
case ctx.Value(semKey) == nil:
ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(1))
case ctx.Value(semKey).(*semaphore.Weighted).TryAcquire(1):
defer ctx.Value(semKey).(*semaphore.Weighted).Release(1)
ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(1))
default:
ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(0))
}
return context.WithValue(ctx, goroutineKey, new(goroutineLock))
}
// CheckPoint provides the ability to recover from a failed command at the failpoint
type CheckPoint struct {
points *queue.AnyQueue
}
// NewCheckPoint returns a CheckPoint by given audit file
func NewCheckPoint(r io.Reader) (*CheckPoint, error) {
cp := CheckPoint{points: queue.NewAnyQueue(comparePoint)}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
m, err := checkLine(line)
if err != nil {
return nil, errors.Annotate(err, "initial checkpoint failed")
}
if m == nil {
continue
}
cp.points.Put(m)
}
if err := scanner.Err(); err != nil {
return nil, errors.Annotate(err, "failed to parse audit file %s")
}
return &cp, nil
}
// Acquire get point from checkpoints
func (c *CheckPoint) Acquire(ctx context.Context, point map[string]interface{}) *Point {
acquired := ctx.Value(semKey).(*semaphore.Weighted).TryAcquire(1)
if p := c.points.Get(point); p != nil {
return &Point{ctx, p.(map[string]interface{}), acquired}
}
return &Point{ctx, nil, acquired}
}
// Point is a point of checkpoint
type Point struct {
ctx context.Context
point map[string]interface{}
acquired bool
}
// Hit returns value of the point, it will be nil if not hit.
func (p *Point) Hit() map[string]interface{} {
return p.point
}
// Release write checkpoint into log file
func (p *Point) Release(err error, fields ...zapcore.Field) {
logfn := zap.L().Info
if err != nil {
logfn = zap.L().Error
fields = append(fields, zap.Error(err))
}
fields = append(fields, zap.Bool("hit", p.Hit() != nil))
if p.acquired {
logfn("CheckPoint", fields...)
// If checkpoint is disabled, the p.ctx will be nil
if p.ctx != nil {
p.ctx.Value(semKey).(*semaphore.Weighted).Release(1)
}
}
}
func checkLine(line string) (map[string]interface{}, error) {
// target log format:
// 2021-01-13T14:11:02.987+0800 INFO SCPCommand {k:v...}
// 2021-01-13T14:11:03.780+0800 INFO SSHCommand {k:v...}
ss := strings.Fields(line)
pos := strings.Index(line, "{")
if len(ss) < 4 || ss[1] != "INFO" || ss[2] != "CheckPoint" || pos == -1 {
return nil, nil
}
m := make(map[string]interface{})
if err := json.Unmarshal([]byte(line[pos:]), &m); err != nil {
return nil, errors.AddStack(err)
}
return m, nil
}
func comparePoint(a, b interface{}) bool {
ma := a.(map[string]interface{})
mb := b.(map[string]interface{})
next_set:
for _, fs := range checkfields {
for _, cf := range fs.Slice() {
if cf.eq == nil {
continue
}
if !cf.eq(ma[cf.field], mb[cf.field]) {
continue next_set
}
}
return true
}
return false
}
| [
"\"DEBUG_CHECKPOINT\""
] | [] | [
"DEBUG_CHECKPOINT"
] | [] | ["DEBUG_CHECKPOINT"] | go | 1 | 0 | |
bot.py | # -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
from datetime import datetime
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
britmsgs=0
client=MongoClient(os.environ['database'])
db=client.fishwars
users=db.users
allseas=db.seas
fighthours=[12, 16, 20, 0]
sealist=['crystal', 'black', 'moon']
officialchat=-1001418916571
rest=False
ban=[]
letters=['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
allletters=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н',
'о', 'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
try:
pass
except Exception as e:
print('Ошибка:\n', traceback.format_exc())
bot.send_message(441399484, traceback.format_exc())
@bot.message_handler(commands=['update'])
def updd(m):
if m.from_user.id==441399484:
users.update_many({},{'$set':{'skills':{}, 'inventory':{}}})
bot.send_message(441399484, 'yes')
@bot.message_handler(commands=['drop'])
def drop(m):
if m.from_user.id==441399484:
allseas.update_many({},{'$set':{'score':0}})
bot.send_message(m.chat.id, 'Сбросил очки всем морям!')
@bot.message_handler(commands=['start'])
def start(m):
user=users.find_one({'id':m.from_user.id})
global rest
if user==None and m.from_user.id==m.chat.id:
users.insert_one(createuser(m.from_user))
kb=types.ReplyKeyboardMarkup(resize_keyboard=True)
al=allseas.find({})
bann=None
sc=0
for ids in al:
if ids['score']>sc:
sc=ids['score']
al=allseas.find({})
banlist=[]
for ids in al:
if ids['score']==sc:
banlist.append(ids['name'])
if len(banlist)>1:
banlist=[]
print(banlist)
for ids in allseas.find({}):
if ids['name'] not in banlist:
kb.add(types.KeyboardButton(sea_ru(ids['name'])))
bot.send_message(m.chat.id, 'Добро пожаловать! Выберите, за какое из морей вы будете сражаться.', reply_markup=kb)
try:
ref=m.text.split(' ')[1]
u=users.find({})
friend=None
for ids in u:
if ids['referal']==ref:
friend=ids
if friend!=None:
users.update_one({'id':friend['id']},{'$push':{'friends':m.from_user.id}})
users.update_one({'id':m.from_user.id},{'$set':{'inviter':friend['id']}})
bot.send_message(friend['id'], m.from_user.first_name+' зашел в игру по вашей рефералке! Когда он поиграет немного, вы получите +1 к максимальной силе!')
except Exception as e:
bot.send_message(441399484, traceback.format_exc())
def mainmenu(user):
kb=types.ReplyKeyboardMarkup(resize_keyboard=True)
kb.add(types.KeyboardButton('💢Атака'), types.KeyboardButton('🛡Защита'))
kb.add(types.KeyboardButton('🍖🥬Питание'), types.KeyboardButton('ℹ️Инфо по игре'))
kb.add(types.KeyboardButton('🐟Обо мне'))
needed=countnextlvl(user['lastlvl'])
text=''
text+='🐟Имя рыбы: '+user['gamename']+'\n'
try:
text+='🌊Родное море: '+sea_ru(user['sea'])+'\n'
except:
pass
text+='💪Силы: '+str(user['strenght'])+'/'+str(user['maxstrenght'])+'\n'
text+='🏅Уровень эволюции: '+str(user['lvl'])+'\n'
text+='🧬Очки эволюции: '+str(user['evolpoints'])+'/'+str(needed)+'\n'
text+='💢Атака: '+str(user['stats']['attack'])+'\n'
text+='🛡Защита: '+str(user['stats']['def'])+'\n'
text+='Реген сил: 1💪 / '+str(round(20*user['strenghtregencoef'], 2))+' минут\n'
if user['freestatspoints']>0:
text+='Доступны очки характеристик! Для использования - /upstats'+'\n'
bot.send_message(user['id'], 'Главное меню.\n'+text, reply_markup=kb)
def blockbrit():
ban.append(512006137)
bot.send_message(512006137, 'Вы заблокированы за отправку больше, чем 4х сообщений в минуту.')
@bot.message_handler()
def allmessages(m):
global rest
user=users.find_one({'id':m.from_user.id})
if user!=None:
# if m.from_user.id==512006137:
# global britmsgs
# britmsgs+=1
# if britmsgs>4:
# blockbrit()
if m.from_user.id not in ban:
if rest==False:
if m.from_user.id==m.chat.id:
if user['sea']==None:
if m.text=='💎Кристальное':
users.update_one({'id':user['id']},{'$set':{'sea':'crystal'}})
bot.send_message(user['id'], 'Теперь вы сражаетесь за территорию 💎Кристального моря!')
mainmenu(user)
if m.text=='⚫️Чёрное':
users.update_one({'id':user['id']},{'$set':{'sea':'black'}})
bot.send_message(user['id'], 'Теперь вы сражаетесь за территорию ⚫️Чёрного моря!')
mainmenu(user)
if m.text=='🌙Лунное':
users.update_one({'id':user['id']},{'$set':{'sea':'moon'}})
bot.send_message(user['id'], 'Теперь вы сражаетесь за территорию 🌙Лунного моря!')
mainmenu(user)
if m.text=='🛡Защита':
users.update_one({'id':user['id']},{'$set':{'battle.action':'def'}})
bot.send_message(user['id'], 'Вы вплыли в оборону своего моря! Ждите следующего сражения.')
if m.text=='💢Атака':
kb=types.ReplyKeyboardMarkup(resize_keyboard=True)
for ids in sealist:
if ids!=user['sea']:
kb.add(types.KeyboardButton(seatoemoj(sea=ids)))
bot.send_message(user['id'], 'Выберите цель.', reply_markup=kb)
if m.text=='🌙' or m.text=='💎' or m.text=='⚫️':
atksea=seatoemoj(emoj=m.text)
if user['sea']!=atksea:
users.update_one({'id':user['id']},{'$set':{'battle.action':'attack', 'battle.target':atksea}})
bot.send_message(user['id'], 'Вы приготовились к атаке на '+sea_ru(atksea)+' море! Ждите начала битвы.')
mainmenu(user)
if m.text=='ℹ️Инфо по игре':
bot.send_message(m.chat.id, 'Очередной неоконченный проект Пасюка. Пока что можно только выбрать море и сражаться за него, '+
'получая для него очки, повышать уровень и улучшать свои характеристики. Битвы в 12:00, 16:00, 20:00 и 24:00 по МСК.')
if m.text=='/menu':
mainmenu(user)
if m.text=='/upstats':
if user['freestatspoints']>0:
text='Свободные очки: '+str(user['freestatspoints'])+'.\nВыберите характеристику для прокачки.'
kb=types.ReplyKeyboardMarkup(resize_keyboard=True)
kb.add(types.KeyboardButton('💢'), types.KeyboardButton('🛡'))
bot.send_message(user['id'], text, reply_markup=kb)
else:
bot.send_message(user['id'], 'Нет свободных очков!')
if m.text=='💢':
if user['freestatspoints']>0:
users.update_one({'id':user['id']},{'$inc':{'freestatspoints':-1, 'stats.attack':1}})
bot.send_message(user['id'], 'Вы стали сильнее!')
else:
bot.send_message(user['id'], 'Нет свободных очков!')
user=users.find_one({'id':m.from_user.id})
mainmenu(user)
if m.text=='🛡':
if user['freestatspoints']>0:
users.update_one({'id':user['id']},{'$inc':{'freestatspoints':-1, 'stats.def':1}})
bot.send_message(user['id'], 'Вы стали сильнее!')
else:
bot.send_message(user['id'], 'Нет свободных очков!')
user=users.find_one({'id':m.from_user.id})
mainmenu(user)
if m.text=='/referal':
if user['referal']==None:
ref=genreferal(user)
users.update_one({'id':user['id']},{'$set':{'referal':ref}})
else:
ref=user['referal']
bot.send_message(user['id'], 'Вот ваша ссылка для приглашения друзей:\n'+'https://telegram.me/Fishwarsbot?start='+ref)
if m.text=='🍖🥬Питание':
kb=types.ReplyKeyboardMarkup(resize_keyboard=True)
kb.add(types.KeyboardButton('🔝Мелководье'), types.KeyboardButton('🕳Глубины'))
kb.add(types.KeyboardButton('⬅️Назад'))
bot.send_message(m.chat.id, 'Выберите, где будете пытаться искать пищу. Чем больше вы питаетесь, тем быстрее идёт развитие!', reply_markup=kb)
if m.text=='🔝Мелководье':
strenght=1
if user['strenght']>=strenght:
if user['status']=='free':
users.update_one({'id':user['id']},{'$set':{'status':'eating'}})
users.update_one({'id':user['id']},{'$inc':{'strenght':-strenght}})
bot.send_message(m.chat.id, 'Вы отправились искать пищу на побережье.')
t=threading.Timer(random.randint(60, 90), coastfeed, args=[user])
t.start()
else:
bot.send_message(user['id'], 'Вы уже заняты чем-то!')
else:
bot.send_message(user['id'], 'Недостаточно сил - даже рыбам нужен отдых!')
user=users.find_one({'id':m.from_user.id})
mainmenu(user)
if m.text=='🕳Глубины':
strenght=2
if user['strenght']>=strenght:
if user['status']=='free':
users.update_one({'id':user['id']},{'$set':{'status':'eating'}})
users.update_one({'id':user['id']},{'$inc':{'strenght':-strenght}})
bot.send_message(m.chat.id, 'Вы отправились искать пищу в глубины моря.')
t=threading.Timer(random.randint(60, 90), depthsfeed, args=[user])
t.start()
else:
bot.send_message(user['id'], 'Вы уже заняты чем-то!')
else:
bot.send_message(user['id'], 'Недостаточно сил - даже рыбам нужен отдых!')
user=users.find_one({'id':m.from_user.id})
mainmenu(user)
if '/fishname' in m.text:
try:
if user['changename']>0:
no=0
name=m.text.split(' ')[1]
if len(name)<=20 and len(name)>1:
for ids in name:
if ids.lower() not in allletters:
no=1
else:
no=1
if no==0:
users.update_one({'id':user['id']},{'$set':{'gamename':name}})
users.update_one({'id':user['id']},{'$inc':{'changename':-1}})
bot.send_message(m.chat.id, 'Вы успешно сменили имя на "*'+name+'*"!', parse_mode='markdown')
else:
bot.send_message(m.chat.id, 'Длина ника должна быть от 2х до 20 символов и содержать только русские и английские буквы!')
else:
bot.send_message(m.chat.id, 'Попытки сменить ник закончились!')
except:
pass
if m.text=='🐟Обо мне' or m.text=='⬅️Назад':
mainmenu(user)
if m.text=='/score':
seas=allseas.find({})
text=''
for ids in seas:
text+=sea_ru(ids['name'])+' море: '+str(ids['score'])+' очков\n'
bot.send_message(m.chat.id, text)
else:
if m.chat.id==m.from_user.id:
bot.send_message(m.chat.id, 'В данный момент идёт битва морей!')
def genreferal(user):
u=users.find({})
ref=''
allref=[]
for ids in users.find({}):
allref.append(ids['referal'])
while len(ref)<32:
ref+=random.choice(letters)
while ref in allref:
ref=''
while len(ref)<32:
ref+=random.choice(letters)
return ref
def coastfeed(user):
users.update_one({'id':user['id']},{'$set':{'status':'free'}})
luckytexts=['На береге вы заметили стаю мальков и решили, что это будет отличным перекусом.',
'На поверхности плавал труп какой-то неизвестной рыбы. Его вы и решили сьесть. Рыбы вообще едят всё, что видят.']
falsetexts=['Пока вы добирались до берега, вы почувствовали активные вибрации неподалеку, означающие, что кого-то едят. Чтобы '+\
'самим не стать кормом, вы вернулись в безопасное место.']
chance=70*user['agility']
if 'slow' in user['skills']:
chance+=user['skills']['slow']['lvl']*0.5
coef=1
if random.randint(1,100)<=chance:
i=user['recievepoints']*user['pointmodifer']*coef
bottompoints=int(i*0.8)
toppoints=int(i*1.2)
points=random.randint(bottompoints, toppoints)
if points<=0:
points=1
text=random.choice(luckytexts)
text+='\nПолучено:\n'+'*Очки эволюции*: '+str(points)+'🧬'
bot.send_message(user['id'], text, parse_mode='markdown')
recieveexp(user, points)
else:
text=random.choice(falsetexts)
bot.send_message(user['id'], text, parse_mode='markdown')
def depthsfeed(user):
users.update_one({'id':user['id']},{'$set':{'status':'free'}})
luckytexts=['В глубинах моря вы нашли стаю крабов. Пришлось потрудиться, чтобы не быть покусанными, но в итоге вы наелись.',
'Вы нашли какие-то вкусные на вид растения. Для получения очков эволюции сойдёт.']
falsetexts=['В один момент вашего заплыва вы ощутили, что давление становится слишком сильным. Если бы вы поплыли дальше, то вас просто сплющило бы.']
chance=55*user['agility']
if 'slow' in user['skills']:
chance+=user['skills']['slow']['lvl']*0.5
coef=2.5
if random.randint(1,100)<=chance:
i=user['recievepoints']*user['pointmodifer']*coef
bottompoints=int(i*0.8)
toppoints=int(i*1.2)
points=random.randint(bottompoints, toppoints)
if points<=0:
points=1
text=random.choice(luckytexts)
text+='\nПолучено:\n'+'*Очки эволюции*: '+str(points)+'🧬'
bot.send_message(user['id'], text, parse_mode='markdown')
recieveexp(user, points)
else:
text=random.choice(falsetexts)
bot.send_message(user['id'], text, parse_mode='markdown')
def recieveexp(user, exp):
users.update_one({'id':user['id']},{'$inc':{'evolpoints':exp}})
c=int(countnextlvl(user['lastlvl']))
if user['evolpoints']+exp>=c:
users.update_one({'id':user['id']},{'$set':{'lastlvl':c, 'recievepoints':countnextpointrecieve(user['recievepoints'])}})
users.update_one({'id':user['id']},{'$inc':{'lvl':1, 'freeevolpoints':2, 'freestatspoints':1}})
bot.send_message(user['id'], 'Поздравляем! Вы эволюционировали! Прокачка скиллов - /skills (пока что недоступна).')
user=users.find_one({'id':user['id']})
if user['lvl']==3 and user['inviter']!=None:
users.update_one({'id':user['inviter']},{'$inc':{'maxstrenght':1}})
bot.send_message(user['inviter'], user['gamename']+' освоился в игре! Вы получаете +1 к выносливости.')
def seatoemoj(sea=None, emoj=None):
if sea=='moon':
return '🌙'
if sea=='crystal':
return '💎'
if sea=='black':
return '⚫️'
if emoj=='⚫️':
return 'black'
if emoj=='💎':
return 'crystal'
if emoj=='🌙':
return 'moon'
def endrest():
global rest
rest=False
def seafight():
seas={}
cusers=users.find({})
for ids in sealist:
seas.update(createsea(ids))
for ids in cusers:
if ids['battle']['action']=='def':
seas[ids['sea']]['defers'].update({ids['id']:ids})
elif ids['battle']['action']=='attack':
seas[ids['battle']['target']]['attackers'].update({ids['id']:ids})
for ids in seas:
sea=seas[ids]
print(sea)
for idss in sea['defers']:
user=sea['defers'][idss]
defpower=user['stats']['def']
if 'fat' in user['skills']:
defpower+=defpower*user['skills']['fat']['lvl']*0.01
if 'steelleather' in user['skills']:
if random.randint(1,1000)<=user['skills']['steelleather']['lvl']*0.5*10:
if len(seas[ids]['attackers'])>0:
trgt=random.choice(seas[ids]['attackers'])
trgt['attack']=trgt['attack']/2
bot.send_message(user['id'], 'Своей кожей вы заблокировали рыбу '+trgt['gamename']+', снизив ее характеристики на 50%!')
sea['defpower']+=defpower
for idss in sea['attackers']:
user=sea['attackers'][idss]
if 'sharpteeth' in user['skills']:
user['stats']['attack']+=user['stats']['attack']*user['skills']['sharpteeth']['lvl']*0.01
sea['attackerspower']+=user['stats']['attack']
if sea['defpower']<sea['attackerspower']:
sea['saved']=False
text=''
for ids in seas:
sea=seas[ids]
if sea['saved']==False:
sea['score']+=0
scores=[]
for idss in sea['attackers']:
atker=sea['attackers'][idss]
if atker['sea'] not in scores:
scores.append(atker['sea'])
seas[atker['sea']]['score']+=3
text+='💢'+sea_ru(sea['name'])+' море потерпело поражение в битве! Топ атакующих:\n'
who='attackers'
stat='attack'
text+=battletext(sea, who, stat)
text+='Топ защитников:\n'
who='defers'
stat='def'
text+=battletext(sea, who, stat)
else:
sea['score']+=8
text+='🛡'+sea_ru(sea['name'])+' море отстояло свою территорию! Топ защитников:\n'
who='defers'
stat='def'
text+=battletext(sea, who, stat)
text+='Топ атакующих:\n'
who='attackers'
stat='attack'
text+=battletext(sea, who, stat)
text+='Начисленные очки:\n\n'
for ids in seas:
text+=sea_ru(seas[ids]['name'])+' море: '+str(seas[ids]['score'])+' очков\n'
allseas.update_one({'name':seas[ids]['name']},{'$inc':{'score':seas[ids]['score']}})
users.update_many({},{'$set':{'battle.target':None, 'battle.action':None}})
bot.send_message(officialchat, 'Результаты битвы:\n\n'+text)
def battletext(sea, who, stat):
top=5
i=0
text=''
alreadyintext=[]
while i<top:
intext=None
maxstat=0
for idss in sea[who]:
user=sea[who][idss]
if user['stats'][stat]>maxstat and user['id'] not in alreadyintext:
maxstat=user['stats'][stat]
intext=user
if intext!=None:
alreadyintext.append(intext['id'])
text+=intext['gamename']
text+=', '
i+=1
if len(sea[who])>0:
text=text[:len(text)-2]
text+='.'
text+='\n\n'
return text
def createuser(user):
stats={
'attack':1,
'def':1
}
battle={
'action':None,
'target':None
}
return {
'id':user.id,
'name':user.first_name,
'gamename':user.first_name,
'stats':stats,
'sea':None,
'status':'free',
'maxstrenght':8,
'strenght':8,
'agility':1, # 1 = 100%
'battle':battle,
'evolpoints':0,
'lvl':1,
'inventory':{},
'freestatspoints':0,
'freeevolpoints':0,
'lastlvl':0,
'strenghtregencoef':1, # Чем меньше, тем лучше
'laststrenghtregen':None,
'recievepoints':1, # 1 = 1 exp
'pointmodifer':1, # 1 = 100%
'referal':None,
'changename':3,
'skills':{}
}
def regenstrenght(user):
users.update_one({'id':user['id']},{'$inc':{'strenght':1}})
users.update_one({'id':user['id']},{'$set':{'laststrenghtregen':time.time()+3*3600}})
def countnextlvl(lastlvl):
if lastlvl!=0:
nextlvl=int(lastlvl*2.9)
else:
nextlvl=10
return nextlvl
def countnextpointrecieve(recievepoints):
return recievepoints*2.1
def sea_ru(sea):
if sea=='crystal':
return '💎Кристальное'
if sea=='black':
return '⚫️Чёрное'
if sea=='moon':
return '🌙Лунное'
def createsea(sea):
return {sea:{
'name':sea,
'defpower':0,
'attackerspower':0,
'defers':{},
'attackers':{},
'saved':True,
'score':0
}
}
def timecheck():
globaltime=time.time()+3*3600
ctime=str(datetime.fromtimestamp(globaltime)).split(' ')[1]
global rest
chour=int(ctime.split(':')[0])
cminute=int(ctime.split(':')[1])
csecond=float(ctime.split(':')[2])
csecond=round(csecond, 0)
if chour in fighthours and rest==False and cminute==0:
seafight()
rest=True
t=threading.Timer(120, endrest)
t.start()
for ids in users.find({}):
user=ids
if user['strenght']<user['maxstrenght']:
if user['laststrenghtregen']==None:
regenstrenght(user)
elif globaltime>=user['laststrenghtregen']+20*60*user['strenghtregencoef']:
regenstrenght(user)
if csecond==0:
global britmsgs
britmsgs=0
global ban
ban=[]
t=threading.Timer(1, timecheck)
t.start()
timecheck()
users.update_many({},{'$set':{'status':'free'}})
print('7777')
bot.polling(none_stop=True,timeout=600)
| [] | [] | [
"database",
"TELEGRAM_TOKEN"
] | [] | ["database", "TELEGRAM_TOKEN"] | python | 2 | 0 | |
tencentcloud/iotexplorer/v20190423/client.go | // Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20190423
import (
"context"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
)
const APIVersion = "2019-04-23"
type Client struct {
common.Client
}
// Deprecated
func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) {
cpf := profile.NewClientProfile()
client = &Client{}
client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf)
return
}
func NewClient(credential common.CredentialIface, region string, clientProfile *profile.ClientProfile) (client *Client, err error) {
client = &Client{}
client.Init(region).
WithCredential(credential).
WithProfile(clientProfile)
return
}
func NewBindDevicesRequest() (request *BindDevicesRequest) {
request = &BindDevicesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "BindDevices")
return
}
func NewBindDevicesResponse() (response *BindDevicesResponse) {
response = &BindDevicesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// BindDevices
// 批量绑定子设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICEISNOTGATEWAY = "InvalidParameterValue.DeviceIsNotGateway"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) BindDevices(request *BindDevicesRequest) (response *BindDevicesResponse, err error) {
if request == nil {
request = NewBindDevicesRequest()
}
response = NewBindDevicesResponse()
err = c.Send(request, response)
return
}
// BindDevices
// 批量绑定子设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICEISNOTGATEWAY = "InvalidParameterValue.DeviceIsNotGateway"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) BindDevicesWithContext(ctx context.Context, request *BindDevicesRequest) (response *BindDevicesResponse, err error) {
if request == nil {
request = NewBindDevicesRequest()
}
request.SetContext(ctx)
response = NewBindDevicesResponse()
err = c.Send(request, response)
return
}
func NewBindProductsRequest() (request *BindProductsRequest) {
request = &BindProductsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "BindProducts")
return
}
func NewBindProductsResponse() (response *BindProductsResponse) {
response = &BindProductsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// BindProducts
// 批量绑定子产品
//
// 可能返回的错误码:
// FAILEDOPERATION_SOMEPRODUCTISALREADYBINDED = "FailedOperation.SomeProductIsAlreadyBinded"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETER_PRODUCTISNOTGATEWAY = "InvalidParameter.ProductIsNotGateway"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PRODUCTISNOTGATEWAY = "InvalidParameterValue.ProductIsNotGateway"
// LIMITEXCEEDED_BINDPRODUCTSEXCEEDLIMIT = "LimitExceeded.BindProductsExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) BindProducts(request *BindProductsRequest) (response *BindProductsResponse, err error) {
if request == nil {
request = NewBindProductsRequest()
}
response = NewBindProductsResponse()
err = c.Send(request, response)
return
}
// BindProducts
// 批量绑定子产品
//
// 可能返回的错误码:
// FAILEDOPERATION_SOMEPRODUCTISALREADYBINDED = "FailedOperation.SomeProductIsAlreadyBinded"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETER_PRODUCTISNOTGATEWAY = "InvalidParameter.ProductIsNotGateway"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PRODUCTISNOTGATEWAY = "InvalidParameterValue.ProductIsNotGateway"
// LIMITEXCEEDED_BINDPRODUCTSEXCEEDLIMIT = "LimitExceeded.BindProductsExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) BindProductsWithContext(ctx context.Context, request *BindProductsRequest) (response *BindProductsResponse, err error) {
if request == nil {
request = NewBindProductsRequest()
}
request.SetContext(ctx)
response = NewBindProductsResponse()
err = c.Send(request, response)
return
}
func NewCallDeviceActionAsyncRequest() (request *CallDeviceActionAsyncRequest) {
request = &CallDeviceActionAsyncRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CallDeviceActionAsync")
return
}
func NewCallDeviceActionAsyncResponse() (response *CallDeviceActionAsyncResponse) {
response = &CallDeviceActionAsyncResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CallDeviceActionAsync
// 提供给用户异步调用设备行为的能力
//
// 可能返回的错误码:
// FAILEDOPERATION_ACTIONUNREACHABLE = "FailedOperation.ActionUnreachable"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETER_ACTIONINPUTPARAMSINVALID = "InvalidParameter.ActionInputParamsInvalid"
// INVALIDPARAMETERVALUE_ACTIONNILORNOTEXIST = "InvalidParameterValue.ActionNilOrNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELPROPERTYNOTEXIST = "InvalidParameterValue.ModelPropertyNotExist"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CallDeviceActionAsync(request *CallDeviceActionAsyncRequest) (response *CallDeviceActionAsyncResponse, err error) {
if request == nil {
request = NewCallDeviceActionAsyncRequest()
}
response = NewCallDeviceActionAsyncResponse()
err = c.Send(request, response)
return
}
// CallDeviceActionAsync
// 提供给用户异步调用设备行为的能力
//
// 可能返回的错误码:
// FAILEDOPERATION_ACTIONUNREACHABLE = "FailedOperation.ActionUnreachable"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETER_ACTIONINPUTPARAMSINVALID = "InvalidParameter.ActionInputParamsInvalid"
// INVALIDPARAMETERVALUE_ACTIONNILORNOTEXIST = "InvalidParameterValue.ActionNilOrNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELPROPERTYNOTEXIST = "InvalidParameterValue.ModelPropertyNotExist"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CallDeviceActionAsyncWithContext(ctx context.Context, request *CallDeviceActionAsyncRequest) (response *CallDeviceActionAsyncResponse, err error) {
if request == nil {
request = NewCallDeviceActionAsyncRequest()
}
request.SetContext(ctx)
response = NewCallDeviceActionAsyncResponse()
err = c.Send(request, response)
return
}
func NewCallDeviceActionSyncRequest() (request *CallDeviceActionSyncRequest) {
request = &CallDeviceActionSyncRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CallDeviceActionSync")
return
}
func NewCallDeviceActionSyncResponse() (response *CallDeviceActionSyncResponse) {
response = &CallDeviceActionSyncResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CallDeviceActionSync
// 为用户提供同步调用设备行为的能力。
//
// 可能返回的错误码:
// FAILEDOPERATION_ACTIONUNREACHABLE = "FailedOperation.ActionUnreachable"
// FAILEDOPERATION_TIMEOUT = "FailedOperation.Timeout"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETER_ACTIONINPUTPARAMSINVALID = "InvalidParameter.ActionInputParamsInvalid"
// INVALIDPARAMETERVALUE_ACTIONNILORNOTEXIST = "InvalidParameterValue.ActionNilOrNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELPROPERTYNOTEXIST = "InvalidParameterValue.ModelPropertyNotExist"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CallDeviceActionSync(request *CallDeviceActionSyncRequest) (response *CallDeviceActionSyncResponse, err error) {
if request == nil {
request = NewCallDeviceActionSyncRequest()
}
response = NewCallDeviceActionSyncResponse()
err = c.Send(request, response)
return
}
// CallDeviceActionSync
// 为用户提供同步调用设备行为的能力。
//
// 可能返回的错误码:
// FAILEDOPERATION_ACTIONUNREACHABLE = "FailedOperation.ActionUnreachable"
// FAILEDOPERATION_TIMEOUT = "FailedOperation.Timeout"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETER_ACTIONINPUTPARAMSINVALID = "InvalidParameter.ActionInputParamsInvalid"
// INVALIDPARAMETERVALUE_ACTIONNILORNOTEXIST = "InvalidParameterValue.ActionNilOrNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELPROPERTYNOTEXIST = "InvalidParameterValue.ModelPropertyNotExist"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CallDeviceActionSyncWithContext(ctx context.Context, request *CallDeviceActionSyncRequest) (response *CallDeviceActionSyncResponse, err error) {
if request == nil {
request = NewCallDeviceActionSyncRequest()
}
request.SetContext(ctx)
response = NewCallDeviceActionSyncResponse()
err = c.Send(request, response)
return
}
func NewControlDeviceDataRequest() (request *ControlDeviceDataRequest) {
request = &ControlDeviceDataRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ControlDeviceData")
return
}
func NewControlDeviceDataResponse() (response *ControlDeviceDataResponse) {
response = &ControlDeviceDataResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ControlDeviceData
// 根据设备产品ID、设备名称,设置控制设备的属性数据。
//
// 可能返回的错误码:
// FAILEDOPERATION = "FailedOperation"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_LORANOUPLINK = "UnsupportedOperation.LoRaNoUpLink"
// UNSUPPORTEDOPERATION_LORANOTACTIVATE = "UnsupportedOperation.LoRaNotActivate"
func (c *Client) ControlDeviceData(request *ControlDeviceDataRequest) (response *ControlDeviceDataResponse, err error) {
if request == nil {
request = NewControlDeviceDataRequest()
}
response = NewControlDeviceDataResponse()
err = c.Send(request, response)
return
}
// ControlDeviceData
// 根据设备产品ID、设备名称,设置控制设备的属性数据。
//
// 可能返回的错误码:
// FAILEDOPERATION = "FailedOperation"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_LORANOUPLINK = "UnsupportedOperation.LoRaNoUpLink"
// UNSUPPORTEDOPERATION_LORANOTACTIVATE = "UnsupportedOperation.LoRaNotActivate"
func (c *Client) ControlDeviceDataWithContext(ctx context.Context, request *ControlDeviceDataRequest) (response *ControlDeviceDataResponse, err error) {
if request == nil {
request = NewControlDeviceDataRequest()
}
request.SetContext(ctx)
response = NewControlDeviceDataResponse()
err = c.Send(request, response)
return
}
func NewCreateBatchProductionRequest() (request *CreateBatchProductionRequest) {
request = &CreateBatchProductionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateBatchProduction")
return
}
func NewCreateBatchProductionResponse() (response *CreateBatchProductionResponse) {
response = &CreateBatchProductionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateBatchProduction
// 用于新建批量生产设备
//
// 可能返回的错误码:
// FAILEDOPERATION_PRODUCTNOTRELEASED = "FailedOperation.ProductNotReleased"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// LIMITEXCEEDED_BATCHPRODUCTIONEXCEEDLIMIT = "LimitExceeded.BatchProductionExceedLimit"
// LIMITEXCEEDED_BATCHPRODUCTIONNULL = "LimitExceeded.BatchProductionNull"
// RESOURCEINSUFFICIENT_BATCHPRODUCTIONISRUNNING = "ResourceInsufficient.BatchProductionIsRunning"
// RESOURCENOTFOUND_CANNOTGETFROMURL = "ResourceNotFound.CannotGetFromUrl"
// RESOURCENOTFOUND_DEVICEDUPKEYEXIST = "ResourceNotFound.DeviceDupKeyExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICEEXCEEDLIMIT = "UnsupportedOperation.DeviceExceedLimit"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CreateBatchProduction(request *CreateBatchProductionRequest) (response *CreateBatchProductionResponse, err error) {
if request == nil {
request = NewCreateBatchProductionRequest()
}
response = NewCreateBatchProductionResponse()
err = c.Send(request, response)
return
}
// CreateBatchProduction
// 用于新建批量生产设备
//
// 可能返回的错误码:
// FAILEDOPERATION_PRODUCTNOTRELEASED = "FailedOperation.ProductNotReleased"
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// LIMITEXCEEDED_BATCHPRODUCTIONEXCEEDLIMIT = "LimitExceeded.BatchProductionExceedLimit"
// LIMITEXCEEDED_BATCHPRODUCTIONNULL = "LimitExceeded.BatchProductionNull"
// RESOURCEINSUFFICIENT_BATCHPRODUCTIONISRUNNING = "ResourceInsufficient.BatchProductionIsRunning"
// RESOURCENOTFOUND_CANNOTGETFROMURL = "ResourceNotFound.CannotGetFromUrl"
// RESOURCENOTFOUND_DEVICEDUPKEYEXIST = "ResourceNotFound.DeviceDupKeyExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICEEXCEEDLIMIT = "UnsupportedOperation.DeviceExceedLimit"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CreateBatchProductionWithContext(ctx context.Context, request *CreateBatchProductionRequest) (response *CreateBatchProductionResponse, err error) {
if request == nil {
request = NewCreateBatchProductionRequest()
}
request.SetContext(ctx)
response = NewCreateBatchProductionResponse()
err = c.Send(request, response)
return
}
func NewCreateDeviceRequest() (request *CreateDeviceRequest) {
request = &CreateDeviceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateDevice")
return
}
func NewCreateDeviceResponse() (response *CreateDeviceResponse) {
response = &CreateDeviceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateDevice
// 创建设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE_DEVICEALREADYEXIST = "InvalidParameterValue.DeviceAlreadyExist"
// LIMITEXCEEDED_DEVICEEXCEEDLIMIT = "LimitExceeded.DeviceExceedLimit"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNAUTHORIZEDOPERATION_PRODUCTNOTSUPPORTPSK = "UnauthorizedOperation.ProductNotSupportPSK"
// UNAUTHORIZEDOPERATION_USERLICENSEEXCEEDLIMIT = "UnauthorizedOperation.UserLicenseExceedLimit"
// UNSUPPORTEDOPERATION_DEVICEDUPKEYEXIST = "UnsupportedOperation.DeviceDupKeyExist"
// UNSUPPORTEDOPERATION_DEVICEEXCEEDLIMIT = "UnsupportedOperation.DeviceExceedLimit"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CreateDevice(request *CreateDeviceRequest) (response *CreateDeviceResponse, err error) {
if request == nil {
request = NewCreateDeviceRequest()
}
response = NewCreateDeviceResponse()
err = c.Send(request, response)
return
}
// CreateDevice
// 创建设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE_DEVICEALREADYEXIST = "InvalidParameterValue.DeviceAlreadyExist"
// LIMITEXCEEDED_DEVICEEXCEEDLIMIT = "LimitExceeded.DeviceExceedLimit"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNAUTHORIZEDOPERATION_PRODUCTNOTSUPPORTPSK = "UnauthorizedOperation.ProductNotSupportPSK"
// UNAUTHORIZEDOPERATION_USERLICENSEEXCEEDLIMIT = "UnauthorizedOperation.UserLicenseExceedLimit"
// UNSUPPORTEDOPERATION_DEVICEDUPKEYEXIST = "UnsupportedOperation.DeviceDupKeyExist"
// UNSUPPORTEDOPERATION_DEVICEEXCEEDLIMIT = "UnsupportedOperation.DeviceExceedLimit"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) CreateDeviceWithContext(ctx context.Context, request *CreateDeviceRequest) (response *CreateDeviceResponse, err error) {
if request == nil {
request = NewCreateDeviceRequest()
}
request.SetContext(ctx)
response = NewCreateDeviceResponse()
err = c.Send(request, response)
return
}
func NewCreateFenceBindRequest() (request *CreateFenceBindRequest) {
request = &CreateFenceBindRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateFenceBind")
return
}
func NewCreateFenceBindResponse() (response *CreateFenceBindResponse) {
response = &CreateFenceBindResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateFenceBind
// 创建围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) CreateFenceBind(request *CreateFenceBindRequest) (response *CreateFenceBindResponse, err error) {
if request == nil {
request = NewCreateFenceBindRequest()
}
response = NewCreateFenceBindResponse()
err = c.Send(request, response)
return
}
// CreateFenceBind
// 创建围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) CreateFenceBindWithContext(ctx context.Context, request *CreateFenceBindRequest) (response *CreateFenceBindResponse, err error) {
if request == nil {
request = NewCreateFenceBindRequest()
}
request.SetContext(ctx)
response = NewCreateFenceBindResponse()
err = c.Send(request, response)
return
}
func NewCreateLoRaFrequencyRequest() (request *CreateLoRaFrequencyRequest) {
request = &CreateLoRaFrequencyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateLoRaFrequency")
return
}
func NewCreateLoRaFrequencyResponse() (response *CreateLoRaFrequencyResponse) {
response = &CreateLoRaFrequencyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateLoRaFrequency
// 创建 LoRa 自定义频点
//
// 可能返回的错误码:
// INVALIDPARAMETERVALUE_LORAFREQPARMSERROR = "InvalidParameterValue.LoRaFreqParmsError"
// LIMITEXCEEDED_STUDIOLORAFREQEXCEEDLIMIT = "LimitExceeded.StudioLoRaFreqExceedLimit"
// UNSUPPORTEDOPERATION_LORAFREQDUPKEYEXIST = "UnsupportedOperation.LoRaFreqDupKeyExist"
func (c *Client) CreateLoRaFrequency(request *CreateLoRaFrequencyRequest) (response *CreateLoRaFrequencyResponse, err error) {
if request == nil {
request = NewCreateLoRaFrequencyRequest()
}
response = NewCreateLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
// CreateLoRaFrequency
// 创建 LoRa 自定义频点
//
// 可能返回的错误码:
// INVALIDPARAMETERVALUE_LORAFREQPARMSERROR = "InvalidParameterValue.LoRaFreqParmsError"
// LIMITEXCEEDED_STUDIOLORAFREQEXCEEDLIMIT = "LimitExceeded.StudioLoRaFreqExceedLimit"
// UNSUPPORTEDOPERATION_LORAFREQDUPKEYEXIST = "UnsupportedOperation.LoRaFreqDupKeyExist"
func (c *Client) CreateLoRaFrequencyWithContext(ctx context.Context, request *CreateLoRaFrequencyRequest) (response *CreateLoRaFrequencyResponse, err error) {
if request == nil {
request = NewCreateLoRaFrequencyRequest()
}
request.SetContext(ctx)
response = NewCreateLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
func NewCreateLoRaGatewayRequest() (request *CreateLoRaGatewayRequest) {
request = &CreateLoRaGatewayRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateLoRaGateway")
return
}
func NewCreateLoRaGatewayResponse() (response *CreateLoRaGatewayResponse) {
response = &CreateLoRaGatewayResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateLoRaGateway
// 创建新 LoRa 网关设备接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_GATEWAYDUPKEYEXIST = "ResourceNotFound.GatewayDupKeyExist"
func (c *Client) CreateLoRaGateway(request *CreateLoRaGatewayRequest) (response *CreateLoRaGatewayResponse, err error) {
if request == nil {
request = NewCreateLoRaGatewayRequest()
}
response = NewCreateLoRaGatewayResponse()
err = c.Send(request, response)
return
}
// CreateLoRaGateway
// 创建新 LoRa 网关设备接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_GATEWAYDUPKEYEXIST = "ResourceNotFound.GatewayDupKeyExist"
func (c *Client) CreateLoRaGatewayWithContext(ctx context.Context, request *CreateLoRaGatewayRequest) (response *CreateLoRaGatewayResponse, err error) {
if request == nil {
request = NewCreateLoRaGatewayRequest()
}
request.SetContext(ctx)
response = NewCreateLoRaGatewayResponse()
err = c.Send(request, response)
return
}
func NewCreatePositionFenceRequest() (request *CreatePositionFenceRequest) {
request = &CreatePositionFenceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreatePositionFence")
return
}
func NewCreatePositionFenceResponse() (response *CreatePositionFenceResponse) {
response = &CreatePositionFenceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreatePositionFence
// 创建围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
// UNSUPPORTEDOPERATION_FENCEDUPKEYEXIST = "UnsupportedOperation.FenceDupKeyExist"
func (c *Client) CreatePositionFence(request *CreatePositionFenceRequest) (response *CreatePositionFenceResponse, err error) {
if request == nil {
request = NewCreatePositionFenceRequest()
}
response = NewCreatePositionFenceResponse()
err = c.Send(request, response)
return
}
// CreatePositionFence
// 创建围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
// UNSUPPORTEDOPERATION_FENCEDUPKEYEXIST = "UnsupportedOperation.FenceDupKeyExist"
func (c *Client) CreatePositionFenceWithContext(ctx context.Context, request *CreatePositionFenceRequest) (response *CreatePositionFenceResponse, err error) {
if request == nil {
request = NewCreatePositionFenceRequest()
}
request.SetContext(ctx)
response = NewCreatePositionFenceResponse()
err = c.Send(request, response)
return
}
func NewCreatePositionSpaceRequest() (request *CreatePositionSpaceRequest) {
request = &CreatePositionSpaceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreatePositionSpace")
return
}
func NewCreatePositionSpaceResponse() (response *CreatePositionSpaceResponse) {
response = &CreatePositionSpaceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreatePositionSpace
// 创建位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_SPACEDUPKEYEXIST = "UnsupportedOperation.SpaceDupKeyExist"
func (c *Client) CreatePositionSpace(request *CreatePositionSpaceRequest) (response *CreatePositionSpaceResponse, err error) {
if request == nil {
request = NewCreatePositionSpaceRequest()
}
response = NewCreatePositionSpaceResponse()
err = c.Send(request, response)
return
}
// CreatePositionSpace
// 创建位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_SPACEDUPKEYEXIST = "UnsupportedOperation.SpaceDupKeyExist"
func (c *Client) CreatePositionSpaceWithContext(ctx context.Context, request *CreatePositionSpaceRequest) (response *CreatePositionSpaceResponse, err error) {
if request == nil {
request = NewCreatePositionSpaceRequest()
}
request.SetContext(ctx)
response = NewCreatePositionSpaceResponse()
err = c.Send(request, response)
return
}
func NewCreateProjectRequest() (request *CreateProjectRequest) {
request = &CreateProjectRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateProject")
return
}
func NewCreateProjectResponse() (response *CreateProjectResponse) {
response = &CreateProjectResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateProject
// 为用户提供新建项目的能力,用于集中管理产品和应用。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// LIMITEXCEEDED_PROJECTEXCEEDLIMIT = "LimitExceeded.ProjectExceedLimit"
// RESOURCENOTFOUND_DEVICEDUPKEYEXIST = "ResourceNotFound.DeviceDupKeyExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_MODULENOTEXIST = "ResourceNotFound.ModuleNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOINSTANCE = "UnauthorizedOperation.NoPermissionToStudioInstance"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_POOLEXISTUNDERPROJECT = "UnsupportedOperation.PoolExistUnderProject"
// UNSUPPORTEDOPERATION_PROJECTDUPKEYEXIST = "UnsupportedOperation.ProjectDupKeyExist"
func (c *Client) CreateProject(request *CreateProjectRequest) (response *CreateProjectResponse, err error) {
if request == nil {
request = NewCreateProjectRequest()
}
response = NewCreateProjectResponse()
err = c.Send(request, response)
return
}
// CreateProject
// 为用户提供新建项目的能力,用于集中管理产品和应用。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// LIMITEXCEEDED_PROJECTEXCEEDLIMIT = "LimitExceeded.ProjectExceedLimit"
// RESOURCENOTFOUND_DEVICEDUPKEYEXIST = "ResourceNotFound.DeviceDupKeyExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_MODULENOTEXIST = "ResourceNotFound.ModuleNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOINSTANCE = "UnauthorizedOperation.NoPermissionToStudioInstance"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_POOLEXISTUNDERPROJECT = "UnsupportedOperation.PoolExistUnderProject"
// UNSUPPORTEDOPERATION_PROJECTDUPKEYEXIST = "UnsupportedOperation.ProjectDupKeyExist"
func (c *Client) CreateProjectWithContext(ctx context.Context, request *CreateProjectRequest) (response *CreateProjectResponse, err error) {
if request == nil {
request = NewCreateProjectRequest()
}
request.SetContext(ctx)
response = NewCreateProjectResponse()
err = c.Send(request, response)
return
}
func NewCreateStudioProductRequest() (request *CreateStudioProductRequest) {
request = &CreateStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateStudioProduct")
return
}
func NewCreateStudioProductResponse() (response *CreateStudioProductResponse) {
response = &CreateStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateStudioProduct
// 为用户提供新建产品的能力,用于管理用户的设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// INVALIDPARAMETERVALUE_USERIDINVALID = "InvalidParameterValue.UserIDInvalid"
// LIMITEXCEEDED_PRODUCTEXCEEDLIMIT = "LimitExceeded.ProductExceedLimit"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// LIMITEXCEEDED_THINGMODELEXCEEDLIMIT = "LimitExceeded.ThingModelExceedLimit"
// MISSINGPARAMETER_MODELDEFINEEVENTTYPEERROR = "MissingParameter.ModelDefineEventTypeError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_PRODUCTDUPKEYEXIST = "UnsupportedOperation.ProductDupKeyExist"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) CreateStudioProduct(request *CreateStudioProductRequest) (response *CreateStudioProductResponse, err error) {
if request == nil {
request = NewCreateStudioProductRequest()
}
response = NewCreateStudioProductResponse()
err = c.Send(request, response)
return
}
// CreateStudioProduct
// 为用户提供新建产品的能力,用于管理用户的设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// INVALIDPARAMETERVALUE_USERIDINVALID = "InvalidParameterValue.UserIDInvalid"
// LIMITEXCEEDED_PRODUCTEXCEEDLIMIT = "LimitExceeded.ProductExceedLimit"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// LIMITEXCEEDED_THINGMODELEXCEEDLIMIT = "LimitExceeded.ThingModelExceedLimit"
// MISSINGPARAMETER_MODELDEFINEEVENTTYPEERROR = "MissingParameter.ModelDefineEventTypeError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_PRODUCTDUPKEYEXIST = "UnsupportedOperation.ProductDupKeyExist"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) CreateStudioProductWithContext(ctx context.Context, request *CreateStudioProductRequest) (response *CreateStudioProductResponse, err error) {
if request == nil {
request = NewCreateStudioProductRequest()
}
request.SetContext(ctx)
response = NewCreateStudioProductResponse()
err = c.Send(request, response)
return
}
func NewCreateTopicPolicyRequest() (request *CreateTopicPolicyRequest) {
request = &CreateTopicPolicyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateTopicPolicy")
return
}
func NewCreateTopicPolicyResponse() (response *CreateTopicPolicyResponse) {
response = &CreateTopicPolicyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateTopicPolicy
// 本接口(CreateTopicPolicy)用于创建一个Topic
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_TOPICPOLICYALREADYEXIST = "InvalidParameterValue.TopicPolicyAlreadyExist"
// LIMITEXCEEDED_TOPICPOLICYEXCEEDLIMIT = "LimitExceeded.TopicPolicyExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) CreateTopicPolicy(request *CreateTopicPolicyRequest) (response *CreateTopicPolicyResponse, err error) {
if request == nil {
request = NewCreateTopicPolicyRequest()
}
response = NewCreateTopicPolicyResponse()
err = c.Send(request, response)
return
}
// CreateTopicPolicy
// 本接口(CreateTopicPolicy)用于创建一个Topic
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_TOPICPOLICYALREADYEXIST = "InvalidParameterValue.TopicPolicyAlreadyExist"
// LIMITEXCEEDED_TOPICPOLICYEXCEEDLIMIT = "LimitExceeded.TopicPolicyExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) CreateTopicPolicyWithContext(ctx context.Context, request *CreateTopicPolicyRequest) (response *CreateTopicPolicyResponse, err error) {
if request == nil {
request = NewCreateTopicPolicyRequest()
}
request.SetContext(ctx)
response = NewCreateTopicPolicyResponse()
err = c.Send(request, response)
return
}
func NewCreateTopicRuleRequest() (request *CreateTopicRuleRequest) {
request = &CreateTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "CreateTopicRule")
return
}
func NewCreateTopicRuleResponse() (response *CreateTopicRuleResponse) {
response = &CreateTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// CreateTopicRule
// 创建规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULEALREADYEXIST = "InvalidParameterValue.TopicRuleAlreadyExist"
// UNAUTHORIZEDOPERATION_PERMISSIONDENIED = "UnauthorizedOperation.PermissionDenied"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) CreateTopicRule(request *CreateTopicRuleRequest) (response *CreateTopicRuleResponse, err error) {
if request == nil {
request = NewCreateTopicRuleRequest()
}
response = NewCreateTopicRuleResponse()
err = c.Send(request, response)
return
}
// CreateTopicRule
// 创建规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULEALREADYEXIST = "InvalidParameterValue.TopicRuleAlreadyExist"
// UNAUTHORIZEDOPERATION_PERMISSIONDENIED = "UnauthorizedOperation.PermissionDenied"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) CreateTopicRuleWithContext(ctx context.Context, request *CreateTopicRuleRequest) (response *CreateTopicRuleResponse, err error) {
if request == nil {
request = NewCreateTopicRuleRequest()
}
request.SetContext(ctx)
response = NewCreateTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewDeleteDeviceRequest() (request *DeleteDeviceRequest) {
request = &DeleteDeviceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteDevice")
return
}
func NewDeleteDeviceResponse() (response *DeleteDeviceResponse) {
response = &DeleteDeviceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteDevice
// 删除设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DeleteDevice(request *DeleteDeviceRequest) (response *DeleteDeviceResponse, err error) {
if request == nil {
request = NewDeleteDeviceRequest()
}
response = NewDeleteDeviceResponse()
err = c.Send(request, response)
return
}
// DeleteDevice
// 删除设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DeleteDeviceWithContext(ctx context.Context, request *DeleteDeviceRequest) (response *DeleteDeviceResponse, err error) {
if request == nil {
request = NewDeleteDeviceRequest()
}
request.SetContext(ctx)
response = NewDeleteDeviceResponse()
err = c.Send(request, response)
return
}
func NewDeleteDevicesRequest() (request *DeleteDevicesRequest) {
request = &DeleteDevicesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteDevices")
return
}
func NewDeleteDevicesResponse() (response *DeleteDevicesResponse) {
response = &DeleteDevicesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteDevices
// 批量删除设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DeleteDevices(request *DeleteDevicesRequest) (response *DeleteDevicesResponse, err error) {
if request == nil {
request = NewDeleteDevicesRequest()
}
response = NewDeleteDevicesResponse()
err = c.Send(request, response)
return
}
// DeleteDevices
// 批量删除设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DeleteDevicesWithContext(ctx context.Context, request *DeleteDevicesRequest) (response *DeleteDevicesResponse, err error) {
if request == nil {
request = NewDeleteDevicesRequest()
}
request.SetContext(ctx)
response = NewDeleteDevicesResponse()
err = c.Send(request, response)
return
}
func NewDeleteFenceBindRequest() (request *DeleteFenceBindRequest) {
request = &DeleteFenceBindRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteFenceBind")
return
}
func NewDeleteFenceBindResponse() (response *DeleteFenceBindResponse) {
response = &DeleteFenceBindResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteFenceBind
// 删除围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_FENCEBINDNOTEXIST = "ResourceNotFound.FenceBindNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DeleteFenceBind(request *DeleteFenceBindRequest) (response *DeleteFenceBindResponse, err error) {
if request == nil {
request = NewDeleteFenceBindRequest()
}
response = NewDeleteFenceBindResponse()
err = c.Send(request, response)
return
}
// DeleteFenceBind
// 删除围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_FENCEBINDNOTEXIST = "ResourceNotFound.FenceBindNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DeleteFenceBindWithContext(ctx context.Context, request *DeleteFenceBindRequest) (response *DeleteFenceBindResponse, err error) {
if request == nil {
request = NewDeleteFenceBindRequest()
}
request.SetContext(ctx)
response = NewDeleteFenceBindResponse()
err = c.Send(request, response)
return
}
func NewDeleteLoRaFrequencyRequest() (request *DeleteLoRaFrequencyRequest) {
request = &DeleteLoRaFrequencyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteLoRaFrequency")
return
}
func NewDeleteLoRaFrequencyResponse() (response *DeleteLoRaFrequencyResponse) {
response = &DeleteLoRaFrequencyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteLoRaFrequency
// 提供删除LoRa自定义频点的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_NODESEXISTUNDERVPN = "UnsupportedOperation.NodesExistUnderVPN"
// UNSUPPORTEDOPERATION_STUDIOLORAFREQINUSED = "UnsupportedOperation.StudioLoRaFreqInUsed"
func (c *Client) DeleteLoRaFrequency(request *DeleteLoRaFrequencyRequest) (response *DeleteLoRaFrequencyResponse, err error) {
if request == nil {
request = NewDeleteLoRaFrequencyRequest()
}
response = NewDeleteLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
// DeleteLoRaFrequency
// 提供删除LoRa自定义频点的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_NODESEXISTUNDERVPN = "UnsupportedOperation.NodesExistUnderVPN"
// UNSUPPORTEDOPERATION_STUDIOLORAFREQINUSED = "UnsupportedOperation.StudioLoRaFreqInUsed"
func (c *Client) DeleteLoRaFrequencyWithContext(ctx context.Context, request *DeleteLoRaFrequencyRequest) (response *DeleteLoRaFrequencyResponse, err error) {
if request == nil {
request = NewDeleteLoRaFrequencyRequest()
}
request.SetContext(ctx)
response = NewDeleteLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
func NewDeleteLoRaGatewayRequest() (request *DeleteLoRaGatewayRequest) {
request = &DeleteLoRaGatewayRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteLoRaGateway")
return
}
func NewDeleteLoRaGatewayResponse() (response *DeleteLoRaGatewayResponse) {
response = &DeleteLoRaGatewayResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteLoRaGateway
// 删除 LoRa 网关的接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_GATEWAYNOTEXIST = "ResourceNotFound.GatewayNotExist"
func (c *Client) DeleteLoRaGateway(request *DeleteLoRaGatewayRequest) (response *DeleteLoRaGatewayResponse, err error) {
if request == nil {
request = NewDeleteLoRaGatewayRequest()
}
response = NewDeleteLoRaGatewayResponse()
err = c.Send(request, response)
return
}
// DeleteLoRaGateway
// 删除 LoRa 网关的接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_GATEWAYNOTEXIST = "ResourceNotFound.GatewayNotExist"
func (c *Client) DeleteLoRaGatewayWithContext(ctx context.Context, request *DeleteLoRaGatewayRequest) (response *DeleteLoRaGatewayResponse, err error) {
if request == nil {
request = NewDeleteLoRaGatewayRequest()
}
request.SetContext(ctx)
response = NewDeleteLoRaGatewayResponse()
err = c.Send(request, response)
return
}
func NewDeletePositionFenceRequest() (request *DeletePositionFenceRequest) {
request = &DeletePositionFenceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeletePositionFence")
return
}
func NewDeletePositionFenceResponse() (response *DeletePositionFenceResponse) {
response = &DeletePositionFenceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeletePositionFence
// 删除围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
// UNSUPPORTEDOPERATION_BINDSEXISTUNDERFENCE = "UnsupportedOperation.BindsExistUnderFence"
func (c *Client) DeletePositionFence(request *DeletePositionFenceRequest) (response *DeletePositionFenceResponse, err error) {
if request == nil {
request = NewDeletePositionFenceRequest()
}
response = NewDeletePositionFenceResponse()
err = c.Send(request, response)
return
}
// DeletePositionFence
// 删除围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
// UNSUPPORTEDOPERATION_BINDSEXISTUNDERFENCE = "UnsupportedOperation.BindsExistUnderFence"
func (c *Client) DeletePositionFenceWithContext(ctx context.Context, request *DeletePositionFenceRequest) (response *DeletePositionFenceResponse, err error) {
if request == nil {
request = NewDeletePositionFenceRequest()
}
request.SetContext(ctx)
response = NewDeletePositionFenceResponse()
err = c.Send(request, response)
return
}
func NewDeletePositionSpaceRequest() (request *DeletePositionSpaceRequest) {
request = &DeletePositionSpaceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeletePositionSpace")
return
}
func NewDeletePositionSpaceResponse() (response *DeletePositionSpaceResponse) {
response = &DeletePositionSpaceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeletePositionSpace
// 删除位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_FENCEEXISTUNDERSPACE = "UnsupportedOperation.FenceExistUnderSpace"
func (c *Client) DeletePositionSpace(request *DeletePositionSpaceRequest) (response *DeletePositionSpaceResponse, err error) {
if request == nil {
request = NewDeletePositionSpaceRequest()
}
response = NewDeletePositionSpaceResponse()
err = c.Send(request, response)
return
}
// DeletePositionSpace
// 删除位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_FENCEEXISTUNDERSPACE = "UnsupportedOperation.FenceExistUnderSpace"
func (c *Client) DeletePositionSpaceWithContext(ctx context.Context, request *DeletePositionSpaceRequest) (response *DeletePositionSpaceResponse, err error) {
if request == nil {
request = NewDeletePositionSpaceRequest()
}
request.SetContext(ctx)
response = NewDeletePositionSpaceResponse()
err = c.Send(request, response)
return
}
func NewDeleteProjectRequest() (request *DeleteProjectRequest) {
request = &DeleteProjectRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteProject")
return
}
func NewDeleteProjectResponse() (response *DeleteProjectResponse) {
response = &DeleteProjectResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteProject
// 提供删除某个项目的能力
//
// 可能返回的错误码:
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_POOLEXISTUNDERPROJECT = "UnsupportedOperation.PoolExistUnderProject"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) DeleteProject(request *DeleteProjectRequest) (response *DeleteProjectResponse, err error) {
if request == nil {
request = NewDeleteProjectRequest()
}
response = NewDeleteProjectResponse()
err = c.Send(request, response)
return
}
// DeleteProject
// 提供删除某个项目的能力
//
// 可能返回的错误码:
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_POOLEXISTUNDERPROJECT = "UnsupportedOperation.PoolExistUnderProject"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) DeleteProjectWithContext(ctx context.Context, request *DeleteProjectRequest) (response *DeleteProjectResponse, err error) {
if request == nil {
request = NewDeleteProjectRequest()
}
request.SetContext(ctx)
response = NewDeleteProjectResponse()
err = c.Send(request, response)
return
}
func NewDeleteStudioProductRequest() (request *DeleteStudioProductRequest) {
request = &DeleteStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteStudioProduct")
return
}
func NewDeleteStudioProductResponse() (response *DeleteStudioProductResponse) {
response = &DeleteStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteStudioProduct
// 提供删除某个项目下产品的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_GATEWAYPRODUCTHASBINDEDPRODUCT = "UnsupportedOperation.GatewayProductHasBindedProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_PRODUCTHASBINDEDGATEWAYPRODUCT = "UnsupportedOperation.ProductHasBindedGateWayProduct"
func (c *Client) DeleteStudioProduct(request *DeleteStudioProductRequest) (response *DeleteStudioProductResponse, err error) {
if request == nil {
request = NewDeleteStudioProductRequest()
}
response = NewDeleteStudioProductResponse()
err = c.Send(request, response)
return
}
// DeleteStudioProduct
// 提供删除某个项目下产品的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_DEVICESEXISTUNDERPRODUCT = "UnsupportedOperation.DevicesExistUnderProduct"
// UNSUPPORTEDOPERATION_GATEWAYPRODUCTHASBINDEDPRODUCT = "UnsupportedOperation.GatewayProductHasBindedProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_PRODUCTHASBINDEDGATEWAYPRODUCT = "UnsupportedOperation.ProductHasBindedGateWayProduct"
func (c *Client) DeleteStudioProductWithContext(ctx context.Context, request *DeleteStudioProductRequest) (response *DeleteStudioProductResponse, err error) {
if request == nil {
request = NewDeleteStudioProductRequest()
}
request.SetContext(ctx)
response = NewDeleteStudioProductResponse()
err = c.Send(request, response)
return
}
func NewDeleteTopicPolicyRequest() (request *DeleteTopicPolicyRequest) {
request = &DeleteTopicPolicyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteTopicPolicy")
return
}
func NewDeleteTopicPolicyResponse() (response *DeleteTopicPolicyResponse) {
response = &DeleteTopicPolicyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteTopicPolicy
// 本接口(DeleteTopicPolicy)用于删除Topic
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) DeleteTopicPolicy(request *DeleteTopicPolicyRequest) (response *DeleteTopicPolicyResponse, err error) {
if request == nil {
request = NewDeleteTopicPolicyRequest()
}
response = NewDeleteTopicPolicyResponse()
err = c.Send(request, response)
return
}
// DeleteTopicPolicy
// 本接口(DeleteTopicPolicy)用于删除Topic
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) DeleteTopicPolicyWithContext(ctx context.Context, request *DeleteTopicPolicyRequest) (response *DeleteTopicPolicyResponse, err error) {
if request == nil {
request = NewDeleteTopicPolicyRequest()
}
request.SetContext(ctx)
response = NewDeleteTopicPolicyResponse()
err = c.Send(request, response)
return
}
func NewDeleteTopicRuleRequest() (request *DeleteTopicRuleRequest) {
request = &DeleteTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DeleteTopicRule")
return
}
func NewDeleteTopicRuleResponse() (response *DeleteTopicRuleResponse) {
response = &DeleteTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DeleteTopicRule
// 删除规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DeleteTopicRule(request *DeleteTopicRuleRequest) (response *DeleteTopicRuleResponse, err error) {
if request == nil {
request = NewDeleteTopicRuleRequest()
}
response = NewDeleteTopicRuleResponse()
err = c.Send(request, response)
return
}
// DeleteTopicRule
// 删除规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DeleteTopicRuleWithContext(ctx context.Context, request *DeleteTopicRuleRequest) (response *DeleteTopicRuleResponse, err error) {
if request == nil {
request = NewDeleteTopicRuleRequest()
}
request.SetContext(ctx)
response = NewDeleteTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewDescribeBatchProductionRequest() (request *DescribeBatchProductionRequest) {
request = &DescribeBatchProductionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeBatchProduction")
return
}
func NewDescribeBatchProductionResponse() (response *DescribeBatchProductionResponse) {
response = &DescribeBatchProductionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeBatchProduction
// 获取量产详情信息。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_BATCHPRODUCTIONNOTEXIST = "ResourceNotFound.BatchProductionNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeBatchProduction(request *DescribeBatchProductionRequest) (response *DescribeBatchProductionResponse, err error) {
if request == nil {
request = NewDescribeBatchProductionRequest()
}
response = NewDescribeBatchProductionResponse()
err = c.Send(request, response)
return
}
// DescribeBatchProduction
// 获取量产详情信息。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_BATCHPRODUCTIONNOTEXIST = "ResourceNotFound.BatchProductionNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeBatchProductionWithContext(ctx context.Context, request *DescribeBatchProductionRequest) (response *DescribeBatchProductionResponse, err error) {
if request == nil {
request = NewDescribeBatchProductionRequest()
}
request.SetContext(ctx)
response = NewDescribeBatchProductionResponse()
err = c.Send(request, response)
return
}
func NewDescribeBindedProductsRequest() (request *DescribeBindedProductsRequest) {
request = &DescribeBindedProductsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeBindedProducts")
return
}
func NewDescribeBindedProductsResponse() (response *DescribeBindedProductsResponse) {
response = &DescribeBindedProductsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeBindedProducts
// 获取网关产品已经绑定的子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeBindedProducts(request *DescribeBindedProductsRequest) (response *DescribeBindedProductsResponse, err error) {
if request == nil {
request = NewDescribeBindedProductsRequest()
}
response = NewDescribeBindedProductsResponse()
err = c.Send(request, response)
return
}
// DescribeBindedProducts
// 获取网关产品已经绑定的子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeBindedProductsWithContext(ctx context.Context, request *DescribeBindedProductsRequest) (response *DescribeBindedProductsResponse, err error) {
if request == nil {
request = NewDescribeBindedProductsRequest()
}
request.SetContext(ctx)
response = NewDescribeBindedProductsResponse()
err = c.Send(request, response)
return
}
func NewDescribeDeviceRequest() (request *DescribeDeviceRequest) {
request = &DescribeDeviceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeDevice")
return
}
func NewDescribeDeviceResponse() (response *DescribeDeviceResponse) {
response = &DescribeDeviceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeDevice
// 用于查看某个设备的详细信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDevice(request *DescribeDeviceRequest) (response *DescribeDeviceResponse, err error) {
if request == nil {
request = NewDescribeDeviceRequest()
}
response = NewDescribeDeviceResponse()
err = c.Send(request, response)
return
}
// DescribeDevice
// 用于查看某个设备的详细信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDeviceWithContext(ctx context.Context, request *DescribeDeviceRequest) (response *DescribeDeviceResponse, err error) {
if request == nil {
request = NewDescribeDeviceRequest()
}
request.SetContext(ctx)
response = NewDescribeDeviceResponse()
err = c.Send(request, response)
return
}
func NewDescribeDeviceBindGatewayRequest() (request *DescribeDeviceBindGatewayRequest) {
request = &DescribeDeviceBindGatewayRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeDeviceBindGateway")
return
}
func NewDescribeDeviceBindGatewayResponse() (response *DescribeDeviceBindGatewayResponse) {
response = &DescribeDeviceBindGatewayResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeDeviceBindGateway
// 查询设备绑定的网关设备
//
// 可能返回的错误码:
// AUTHFAILURE = "AuthFailure"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_TIMEOUT = "InternalError.Timeout"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICEHASNOTBINDGATEWAY = "InvalidParameterValue.DeviceHasNotBindGateway"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DescribeDeviceBindGateway(request *DescribeDeviceBindGatewayRequest) (response *DescribeDeviceBindGatewayResponse, err error) {
if request == nil {
request = NewDescribeDeviceBindGatewayRequest()
}
response = NewDescribeDeviceBindGatewayResponse()
err = c.Send(request, response)
return
}
// DescribeDeviceBindGateway
// 查询设备绑定的网关设备
//
// 可能返回的错误码:
// AUTHFAILURE = "AuthFailure"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_TIMEOUT = "InternalError.Timeout"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICEHASNOTBINDGATEWAY = "InvalidParameterValue.DeviceHasNotBindGateway"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DescribeDeviceBindGatewayWithContext(ctx context.Context, request *DescribeDeviceBindGatewayRequest) (response *DescribeDeviceBindGatewayResponse, err error) {
if request == nil {
request = NewDescribeDeviceBindGatewayRequest()
}
request.SetContext(ctx)
response = NewDescribeDeviceBindGatewayResponse()
err = c.Send(request, response)
return
}
func NewDescribeDeviceDataRequest() (request *DescribeDeviceDataRequest) {
request = &DescribeDeviceDataRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeDeviceData")
return
}
func NewDescribeDeviceDataResponse() (response *DescribeDeviceDataResponse) {
response = &DescribeDeviceDataResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeDeviceData
// 根据设备产品ID、设备名称,获取设备上报的属性数据。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_DEVICESHADOWNOTEXIST = "ResourceNotFound.DeviceShadowNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDeviceData(request *DescribeDeviceDataRequest) (response *DescribeDeviceDataResponse, err error) {
if request == nil {
request = NewDescribeDeviceDataRequest()
}
response = NewDescribeDeviceDataResponse()
err = c.Send(request, response)
return
}
// DescribeDeviceData
// 根据设备产品ID、设备名称,获取设备上报的属性数据。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_DEVICESHADOWNOTEXIST = "ResourceNotFound.DeviceShadowNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDeviceDataWithContext(ctx context.Context, request *DescribeDeviceDataRequest) (response *DescribeDeviceDataResponse, err error) {
if request == nil {
request = NewDescribeDeviceDataRequest()
}
request.SetContext(ctx)
response = NewDescribeDeviceDataResponse()
err = c.Send(request, response)
return
}
func NewDescribeDeviceDataHistoryRequest() (request *DescribeDeviceDataHistoryRequest) {
request = &DescribeDeviceDataHistoryRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeDeviceDataHistory")
return
}
func NewDescribeDeviceDataHistoryResponse() (response *DescribeDeviceDataHistoryResponse) {
response = &DescribeDeviceDataHistoryResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeDeviceDataHistory
// 获取设备在指定时间范围内上报的历史数据。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDeviceDataHistory(request *DescribeDeviceDataHistoryRequest) (response *DescribeDeviceDataHistoryResponse, err error) {
if request == nil {
request = NewDescribeDeviceDataHistoryRequest()
}
response = NewDescribeDeviceDataHistoryResponse()
err = c.Send(request, response)
return
}
// DescribeDeviceDataHistory
// 获取设备在指定时间范围内上报的历史数据。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTION = "InternalError.InternalServerException"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeDeviceDataHistoryWithContext(ctx context.Context, request *DescribeDeviceDataHistoryRequest) (response *DescribeDeviceDataHistoryResponse, err error) {
if request == nil {
request = NewDescribeDeviceDataHistoryRequest()
}
request.SetContext(ctx)
response = NewDescribeDeviceDataHistoryResponse()
err = c.Send(request, response)
return
}
func NewDescribeDevicePositionListRequest() (request *DescribeDevicePositionListRequest) {
request = &DescribeDevicePositionListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeDevicePositionList")
return
}
func NewDescribeDevicePositionListResponse() (response *DescribeDevicePositionListResponse) {
response = &DescribeDevicePositionListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeDevicePositionList
// 获取设备位置列表
//
// 可能返回的错误码:
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
func (c *Client) DescribeDevicePositionList(request *DescribeDevicePositionListRequest) (response *DescribeDevicePositionListResponse, err error) {
if request == nil {
request = NewDescribeDevicePositionListRequest()
}
response = NewDescribeDevicePositionListResponse()
err = c.Send(request, response)
return
}
// DescribeDevicePositionList
// 获取设备位置列表
//
// 可能返回的错误码:
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
func (c *Client) DescribeDevicePositionListWithContext(ctx context.Context, request *DescribeDevicePositionListRequest) (response *DescribeDevicePositionListResponse, err error) {
if request == nil {
request = NewDescribeDevicePositionListRequest()
}
request.SetContext(ctx)
response = NewDescribeDevicePositionListResponse()
err = c.Send(request, response)
return
}
func NewDescribeFenceBindListRequest() (request *DescribeFenceBindListRequest) {
request = &DescribeFenceBindListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeFenceBindList")
return
}
func NewDescribeFenceBindListResponse() (response *DescribeFenceBindListResponse) {
response = &DescribeFenceBindListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeFenceBindList
// 获取围栏绑定信息列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DescribeFenceBindList(request *DescribeFenceBindListRequest) (response *DescribeFenceBindListResponse, err error) {
if request == nil {
request = NewDescribeFenceBindListRequest()
}
response = NewDescribeFenceBindListResponse()
err = c.Send(request, response)
return
}
// DescribeFenceBindList
// 获取围栏绑定信息列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DescribeFenceBindListWithContext(ctx context.Context, request *DescribeFenceBindListRequest) (response *DescribeFenceBindListResponse, err error) {
if request == nil {
request = NewDescribeFenceBindListRequest()
}
request.SetContext(ctx)
response = NewDescribeFenceBindListResponse()
err = c.Send(request, response)
return
}
func NewDescribeFenceEventListRequest() (request *DescribeFenceEventListRequest) {
request = &DescribeFenceEventListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeFenceEventList")
return
}
func NewDescribeFenceEventListResponse() (response *DescribeFenceEventListResponse) {
response = &DescribeFenceEventListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeFenceEventList
// 获取围栏告警事件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DescribeFenceEventList(request *DescribeFenceEventListRequest) (response *DescribeFenceEventListResponse, err error) {
if request == nil {
request = NewDescribeFenceEventListRequest()
}
response = NewDescribeFenceEventListResponse()
err = c.Send(request, response)
return
}
// DescribeFenceEventList
// 获取围栏告警事件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) DescribeFenceEventListWithContext(ctx context.Context, request *DescribeFenceEventListRequest) (response *DescribeFenceEventListResponse, err error) {
if request == nil {
request = NewDescribeFenceEventListRequest()
}
request.SetContext(ctx)
response = NewDescribeFenceEventListResponse()
err = c.Send(request, response)
return
}
func NewDescribeFirmwareTaskRequest() (request *DescribeFirmwareTaskRequest) {
request = &DescribeFirmwareTaskRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeFirmwareTask")
return
}
func NewDescribeFirmwareTaskResponse() (response *DescribeFirmwareTaskResponse) {
response = &DescribeFirmwareTaskResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeFirmwareTask
// 查询固件升级任务列表
//
// 可能返回的错误码:
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_FIRMWARETASKNOTEXIST = "ResourceNotFound.FirmwareTaskNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) DescribeFirmwareTask(request *DescribeFirmwareTaskRequest) (response *DescribeFirmwareTaskResponse, err error) {
if request == nil {
request = NewDescribeFirmwareTaskRequest()
}
response = NewDescribeFirmwareTaskResponse()
err = c.Send(request, response)
return
}
// DescribeFirmwareTask
// 查询固件升级任务列表
//
// 可能返回的错误码:
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_FIRMWARETASKNOTEXIST = "ResourceNotFound.FirmwareTaskNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) DescribeFirmwareTaskWithContext(ctx context.Context, request *DescribeFirmwareTaskRequest) (response *DescribeFirmwareTaskResponse, err error) {
if request == nil {
request = NewDescribeFirmwareTaskRequest()
}
request.SetContext(ctx)
response = NewDescribeFirmwareTaskResponse()
err = c.Send(request, response)
return
}
func NewDescribeGatewayBindDevicesRequest() (request *DescribeGatewayBindDevicesRequest) {
request = &DescribeGatewayBindDevicesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeGatewayBindDevices")
return
}
func NewDescribeGatewayBindDevicesResponse() (response *DescribeGatewayBindDevicesResponse) {
response = &DescribeGatewayBindDevicesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeGatewayBindDevices
// 获取网关绑定的子设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeGatewayBindDevices(request *DescribeGatewayBindDevicesRequest) (response *DescribeGatewayBindDevicesResponse, err error) {
if request == nil {
request = NewDescribeGatewayBindDevicesRequest()
}
response = NewDescribeGatewayBindDevicesResponse()
err = c.Send(request, response)
return
}
// DescribeGatewayBindDevices
// 获取网关绑定的子设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeGatewayBindDevicesWithContext(ctx context.Context, request *DescribeGatewayBindDevicesRequest) (response *DescribeGatewayBindDevicesResponse, err error) {
if request == nil {
request = NewDescribeGatewayBindDevicesRequest()
}
request.SetContext(ctx)
response = NewDescribeGatewayBindDevicesResponse()
err = c.Send(request, response)
return
}
func NewDescribeGatewaySubProductsRequest() (request *DescribeGatewaySubProductsRequest) {
request = &DescribeGatewaySubProductsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeGatewaySubProducts")
return
}
func NewDescribeGatewaySubProductsResponse() (response *DescribeGatewaySubProductsResponse) {
response = &DescribeGatewaySubProductsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeGatewaySubProducts
// 用于获取网关可绑定或解绑的子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_STAFFPOOLDUPNAMEEXIST = "UnsupportedOperation.StaffPoolDupNameExist"
func (c *Client) DescribeGatewaySubProducts(request *DescribeGatewaySubProductsRequest) (response *DescribeGatewaySubProductsResponse, err error) {
if request == nil {
request = NewDescribeGatewaySubProductsRequest()
}
response = NewDescribeGatewaySubProductsResponse()
err = c.Send(request, response)
return
}
// DescribeGatewaySubProducts
// 用于获取网关可绑定或解绑的子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_STAFFPOOLDUPNAMEEXIST = "UnsupportedOperation.StaffPoolDupNameExist"
func (c *Client) DescribeGatewaySubProductsWithContext(ctx context.Context, request *DescribeGatewaySubProductsRequest) (response *DescribeGatewaySubProductsResponse, err error) {
if request == nil {
request = NewDescribeGatewaySubProductsRequest()
}
request.SetContext(ctx)
response = NewDescribeGatewaySubProductsResponse()
err = c.Send(request, response)
return
}
func NewDescribeLoRaFrequencyRequest() (request *DescribeLoRaFrequencyRequest) {
request = &DescribeLoRaFrequencyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeLoRaFrequency")
return
}
func NewDescribeLoRaFrequencyResponse() (response *DescribeLoRaFrequencyResponse) {
response = &DescribeLoRaFrequencyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeLoRaFrequency
// 提供查询LoRa自定义频点详情的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeLoRaFrequency(request *DescribeLoRaFrequencyRequest) (response *DescribeLoRaFrequencyResponse, err error) {
if request == nil {
request = NewDescribeLoRaFrequencyRequest()
}
response = NewDescribeLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
// DescribeLoRaFrequency
// 提供查询LoRa自定义频点详情的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeLoRaFrequencyWithContext(ctx context.Context, request *DescribeLoRaFrequencyRequest) (response *DescribeLoRaFrequencyResponse, err error) {
if request == nil {
request = NewDescribeLoRaFrequencyRequest()
}
request.SetContext(ctx)
response = NewDescribeLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
func NewDescribeModelDefinitionRequest() (request *DescribeModelDefinitionRequest) {
request = &DescribeModelDefinitionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeModelDefinition")
return
}
func NewDescribeModelDefinitionResponse() (response *DescribeModelDefinitionResponse) {
response = &DescribeModelDefinitionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeModelDefinition
// 查询产品配置的数据模板信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_MODELDEFINEDUPID = "InvalidParameterValue.ModelDefineDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORMODEL = "InvalidParameterValue.ModelDefineErrorModel"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORTYPE = "InvalidParameterValue.ModelDefineErrorType"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSDUPID = "InvalidParameterValue.ModelDefineEventParamsDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSEXCEEDLIMIT = "InvalidParameterValue.ModelDefineEventParamsExceedLimit"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DescribeModelDefinition(request *DescribeModelDefinitionRequest) (response *DescribeModelDefinitionResponse, err error) {
if request == nil {
request = NewDescribeModelDefinitionRequest()
}
response = NewDescribeModelDefinitionResponse()
err = c.Send(request, response)
return
}
// DescribeModelDefinition
// 查询产品配置的数据模板信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_MODELDEFINEDUPID = "InvalidParameterValue.ModelDefineDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORMODEL = "InvalidParameterValue.ModelDefineErrorModel"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORTYPE = "InvalidParameterValue.ModelDefineErrorType"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSDUPID = "InvalidParameterValue.ModelDefineEventParamsDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSEXCEEDLIMIT = "InvalidParameterValue.ModelDefineEventParamsExceedLimit"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DescribeModelDefinitionWithContext(ctx context.Context, request *DescribeModelDefinitionRequest) (response *DescribeModelDefinitionResponse, err error) {
if request == nil {
request = NewDescribeModelDefinitionRequest()
}
request.SetContext(ctx)
response = NewDescribeModelDefinitionResponse()
err = c.Send(request, response)
return
}
func NewDescribePositionFenceListRequest() (request *DescribePositionFenceListRequest) {
request = &DescribePositionFenceListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribePositionFenceList")
return
}
func NewDescribePositionFenceListResponse() (response *DescribePositionFenceListResponse) {
response = &DescribePositionFenceListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribePositionFenceList
// 获取围栏列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribePositionFenceList(request *DescribePositionFenceListRequest) (response *DescribePositionFenceListResponse, err error) {
if request == nil {
request = NewDescribePositionFenceListRequest()
}
response = NewDescribePositionFenceListResponse()
err = c.Send(request, response)
return
}
// DescribePositionFenceList
// 获取围栏列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribePositionFenceListWithContext(ctx context.Context, request *DescribePositionFenceListRequest) (response *DescribePositionFenceListResponse, err error) {
if request == nil {
request = NewDescribePositionFenceListRequest()
}
request.SetContext(ctx)
response = NewDescribePositionFenceListResponse()
err = c.Send(request, response)
return
}
func NewDescribeProjectRequest() (request *DescribeProjectRequest) {
request = &DescribeProjectRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeProject")
return
}
func NewDescribeProjectResponse() (response *DescribeProjectResponse) {
response = &DescribeProjectResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeProject
// 查询项目详情
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DescribeProject(request *DescribeProjectRequest) (response *DescribeProjectResponse, err error) {
if request == nil {
request = NewDescribeProjectRequest()
}
response = NewDescribeProjectResponse()
err = c.Send(request, response)
return
}
// DescribeProject
// 查询项目详情
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) DescribeProjectWithContext(ctx context.Context, request *DescribeProjectRequest) (response *DescribeProjectResponse, err error) {
if request == nil {
request = NewDescribeProjectRequest()
}
request.SetContext(ctx)
response = NewDescribeProjectResponse()
err = c.Send(request, response)
return
}
func NewDescribeSpaceFenceEventListRequest() (request *DescribeSpaceFenceEventListRequest) {
request = &DescribeSpaceFenceEventListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeSpaceFenceEventList")
return
}
func NewDescribeSpaceFenceEventListResponse() (response *DescribeSpaceFenceEventListResponse) {
response = &DescribeSpaceFenceEventListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeSpaceFenceEventList
// 获取位置空间中围栏告警事件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeSpaceFenceEventList(request *DescribeSpaceFenceEventListRequest) (response *DescribeSpaceFenceEventListResponse, err error) {
if request == nil {
request = NewDescribeSpaceFenceEventListRequest()
}
response = NewDescribeSpaceFenceEventListResponse()
err = c.Send(request, response)
return
}
// DescribeSpaceFenceEventList
// 获取位置空间中围栏告警事件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) DescribeSpaceFenceEventListWithContext(ctx context.Context, request *DescribeSpaceFenceEventListRequest) (response *DescribeSpaceFenceEventListResponse, err error) {
if request == nil {
request = NewDescribeSpaceFenceEventListRequest()
}
request.SetContext(ctx)
response = NewDescribeSpaceFenceEventListResponse()
err = c.Send(request, response)
return
}
func NewDescribeStudioProductRequest() (request *DescribeStudioProductRequest) {
request = &DescribeStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeStudioProduct")
return
}
func NewDescribeStudioProductResponse() (response *DescribeStudioProductResponse) {
response = &DescribeStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeStudioProduct
// 提供查看产品详细信息的能力,包括产品的ID、数据协议、认证类型等重要参数
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeStudioProduct(request *DescribeStudioProductRequest) (response *DescribeStudioProductResponse, err error) {
if request == nil {
request = NewDescribeStudioProductRequest()
}
response = NewDescribeStudioProductResponse()
err = c.Send(request, response)
return
}
// DescribeStudioProduct
// 提供查看产品详细信息的能力,包括产品的ID、数据协议、认证类型等重要参数
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DescribeStudioProductWithContext(ctx context.Context, request *DescribeStudioProductRequest) (response *DescribeStudioProductResponse, err error) {
if request == nil {
request = NewDescribeStudioProductRequest()
}
request.SetContext(ctx)
response = NewDescribeStudioProductResponse()
err = c.Send(request, response)
return
}
func NewDescribeTopicPolicyRequest() (request *DescribeTopicPolicyRequest) {
request = &DescribeTopicPolicyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeTopicPolicy")
return
}
func NewDescribeTopicPolicyResponse() (response *DescribeTopicPolicyResponse) {
response = &DescribeTopicPolicyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeTopicPolicy
// 本接口(DescribeTopicPolicy)用于查看Topic详细信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) DescribeTopicPolicy(request *DescribeTopicPolicyRequest) (response *DescribeTopicPolicyResponse, err error) {
if request == nil {
request = NewDescribeTopicPolicyRequest()
}
response = NewDescribeTopicPolicyResponse()
err = c.Send(request, response)
return
}
// DescribeTopicPolicy
// 本接口(DescribeTopicPolicy)用于查看Topic详细信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) DescribeTopicPolicyWithContext(ctx context.Context, request *DescribeTopicPolicyRequest) (response *DescribeTopicPolicyResponse, err error) {
if request == nil {
request = NewDescribeTopicPolicyRequest()
}
request.SetContext(ctx)
response = NewDescribeTopicPolicyResponse()
err = c.Send(request, response)
return
}
func NewDescribeTopicRuleRequest() (request *DescribeTopicRuleRequest) {
request = &DescribeTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DescribeTopicRule")
return
}
func NewDescribeTopicRuleResponse() (response *DescribeTopicRuleResponse) {
response = &DescribeTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DescribeTopicRule
// 获取规则信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULESQLNOTEDITED = "InvalidParameterValue.TopicRuleSqlNotEdited"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DescribeTopicRule(request *DescribeTopicRuleRequest) (response *DescribeTopicRuleResponse, err error) {
if request == nil {
request = NewDescribeTopicRuleRequest()
}
response = NewDescribeTopicRuleResponse()
err = c.Send(request, response)
return
}
// DescribeTopicRule
// 获取规则信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULESQLNOTEDITED = "InvalidParameterValue.TopicRuleSqlNotEdited"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DescribeTopicRuleWithContext(ctx context.Context, request *DescribeTopicRuleRequest) (response *DescribeTopicRuleResponse, err error) {
if request == nil {
request = NewDescribeTopicRuleRequest()
}
request.SetContext(ctx)
response = NewDescribeTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewDirectBindDeviceInFamilyRequest() (request *DirectBindDeviceInFamilyRequest) {
request = &DirectBindDeviceInFamilyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DirectBindDeviceInFamily")
return
}
func NewDirectBindDeviceInFamilyResponse() (response *DirectBindDeviceInFamilyResponse) {
response = &DirectBindDeviceInFamilyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DirectBindDeviceInFamily
// 直接绑定设备和家庭
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PRODUCTRESOURCENOTEXIST = "ResourceNotFound.ProductResourceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_APPNOPERMISSION = "UnauthorizedOperation.AppNoPermission"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOFAMILY = "UnauthorizedOperation.NoPermissionToFamily"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DirectBindDeviceInFamily(request *DirectBindDeviceInFamilyRequest) (response *DirectBindDeviceInFamilyResponse, err error) {
if request == nil {
request = NewDirectBindDeviceInFamilyRequest()
}
response = NewDirectBindDeviceInFamilyResponse()
err = c.Send(request, response)
return
}
// DirectBindDeviceInFamily
// 直接绑定设备和家庭
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PRODUCTRESOURCENOTEXIST = "ResourceNotFound.ProductResourceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_APPNOPERMISSION = "UnauthorizedOperation.AppNoPermission"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOFAMILY = "UnauthorizedOperation.NoPermissionToFamily"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) DirectBindDeviceInFamilyWithContext(ctx context.Context, request *DirectBindDeviceInFamilyRequest) (response *DirectBindDeviceInFamilyResponse, err error) {
if request == nil {
request = NewDirectBindDeviceInFamilyRequest()
}
request.SetContext(ctx)
response = NewDirectBindDeviceInFamilyResponse()
err = c.Send(request, response)
return
}
func NewDisableTopicRuleRequest() (request *DisableTopicRuleRequest) {
request = &DisableTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "DisableTopicRule")
return
}
func NewDisableTopicRuleResponse() (response *DisableTopicRuleResponse) {
response = &DisableTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// DisableTopicRule
// 禁用规则
//
// 可能返回的错误码:
// FAILEDOPERATION_RULEALREADYDISABLED = "FailedOperation.RuleAlreadyDisabled"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULESQLNOTEDITED = "InvalidParameterValue.TopicRuleSqlNotEdited"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DisableTopicRule(request *DisableTopicRuleRequest) (response *DisableTopicRuleResponse, err error) {
if request == nil {
request = NewDisableTopicRuleRequest()
}
response = NewDisableTopicRuleResponse()
err = c.Send(request, response)
return
}
// DisableTopicRule
// 禁用规则
//
// 可能返回的错误码:
// FAILEDOPERATION_RULEALREADYDISABLED = "FailedOperation.RuleAlreadyDisabled"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// INVALIDPARAMETERVALUE_TOPICRULESQLNOTEDITED = "InvalidParameterValue.TopicRuleSqlNotEdited"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) DisableTopicRuleWithContext(ctx context.Context, request *DisableTopicRuleRequest) (response *DisableTopicRuleResponse, err error) {
if request == nil {
request = NewDisableTopicRuleRequest()
}
request.SetContext(ctx)
response = NewDisableTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewEnableTopicRuleRequest() (request *EnableTopicRuleRequest) {
request = &EnableTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "EnableTopicRule")
return
}
func NewEnableTopicRuleResponse() (response *EnableTopicRuleResponse) {
response = &EnableTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// EnableTopicRule
// 启用规则
//
// 可能返回的错误码:
// FAILEDOPERATION_RULEALREADYENABLED = "FailedOperation.RuleAlreadyEnabled"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_CHECKFORWARDURLFAIL = "InvalidParameterValue.CheckForwardURLFail"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) EnableTopicRule(request *EnableTopicRuleRequest) (response *EnableTopicRuleResponse, err error) {
if request == nil {
request = NewEnableTopicRuleRequest()
}
response = NewEnableTopicRuleResponse()
err = c.Send(request, response)
return
}
// EnableTopicRule
// 启用规则
//
// 可能返回的错误码:
// FAILEDOPERATION_RULEALREADYENABLED = "FailedOperation.RuleAlreadyEnabled"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_ACTIONNIL = "InvalidParameterValue.ActionNil"
// INVALIDPARAMETERVALUE_CHECKFORWARDURLFAIL = "InvalidParameterValue.CheckForwardURLFail"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_RULENUMBERBEYONDLIMIT = "InvalidParameterValue.RuleNumberBeyondLimit"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) EnableTopicRuleWithContext(ctx context.Context, request *EnableTopicRuleRequest) (response *EnableTopicRuleResponse, err error) {
if request == nil {
request = NewEnableTopicRuleRequest()
}
request.SetContext(ctx)
response = NewEnableTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewGenSingleDeviceSignatureOfPublicRequest() (request *GenSingleDeviceSignatureOfPublicRequest) {
request = &GenSingleDeviceSignatureOfPublicRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GenSingleDeviceSignatureOfPublic")
return
}
func NewGenSingleDeviceSignatureOfPublicResponse() (response *GenSingleDeviceSignatureOfPublicResponse) {
response = &GenSingleDeviceSignatureOfPublicResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GenSingleDeviceSignatureOfPublic
// 无
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GenSingleDeviceSignatureOfPublic(request *GenSingleDeviceSignatureOfPublicRequest) (response *GenSingleDeviceSignatureOfPublicResponse, err error) {
if request == nil {
request = NewGenSingleDeviceSignatureOfPublicRequest()
}
response = NewGenSingleDeviceSignatureOfPublicResponse()
err = c.Send(request, response)
return
}
// GenSingleDeviceSignatureOfPublic
// 无
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GenSingleDeviceSignatureOfPublicWithContext(ctx context.Context, request *GenSingleDeviceSignatureOfPublicRequest) (response *GenSingleDeviceSignatureOfPublicResponse, err error) {
if request == nil {
request = NewGenSingleDeviceSignatureOfPublicRequest()
}
request.SetContext(ctx)
response = NewGenSingleDeviceSignatureOfPublicResponse()
err = c.Send(request, response)
return
}
func NewGetBatchProductionsListRequest() (request *GetBatchProductionsListRequest) {
request = &GetBatchProductionsListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetBatchProductionsList")
return
}
func NewGetBatchProductionsListResponse() (response *GetBatchProductionsListResponse) {
response = &GetBatchProductionsListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetBatchProductionsList
// 列出量产数据列表信息。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
func (c *Client) GetBatchProductionsList(request *GetBatchProductionsListRequest) (response *GetBatchProductionsListResponse, err error) {
if request == nil {
request = NewGetBatchProductionsListRequest()
}
response = NewGetBatchProductionsListResponse()
err = c.Send(request, response)
return
}
// GetBatchProductionsList
// 列出量产数据列表信息。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETER = "InvalidParameter"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
func (c *Client) GetBatchProductionsListWithContext(ctx context.Context, request *GetBatchProductionsListRequest) (response *GetBatchProductionsListResponse, err error) {
if request == nil {
request = NewGetBatchProductionsListRequest()
}
request.SetContext(ctx)
response = NewGetBatchProductionsListResponse()
err = c.Send(request, response)
return
}
func NewGetCOSURLRequest() (request *GetCOSURLRequest) {
request = &GetCOSURLRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetCOSURL")
return
}
func NewGetCOSURLResponse() (response *GetCOSURLResponse) {
response = &GetCOSURLResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetCOSURL
// 本接口(GetCOSURL)用于获取固件存储在COS的URL
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetCOSURL(request *GetCOSURLRequest) (response *GetCOSURLResponse, err error) {
if request == nil {
request = NewGetCOSURLRequest()
}
response = NewGetCOSURLResponse()
err = c.Send(request, response)
return
}
// GetCOSURL
// 本接口(GetCOSURL)用于获取固件存储在COS的URL
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetCOSURLWithContext(ctx context.Context, request *GetCOSURLRequest) (response *GetCOSURLResponse, err error) {
if request == nil {
request = NewGetCOSURLRequest()
}
request.SetContext(ctx)
response = NewGetCOSURLResponse()
err = c.Send(request, response)
return
}
func NewGetDeviceListRequest() (request *GetDeviceListRequest) {
request = &GetDeviceListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetDeviceList")
return
}
func NewGetDeviceListResponse() (response *GetDeviceListResponse) {
response = &GetDeviceListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetDeviceList
// 用于查询某个产品下的设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetDeviceList(request *GetDeviceListRequest) (response *GetDeviceListResponse, err error) {
if request == nil {
request = NewGetDeviceListRequest()
}
response = NewGetDeviceListResponse()
err = c.Send(request, response)
return
}
// GetDeviceList
// 用于查询某个产品下的设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetDeviceListWithContext(ctx context.Context, request *GetDeviceListRequest) (response *GetDeviceListResponse, err error) {
if request == nil {
request = NewGetDeviceListRequest()
}
request.SetContext(ctx)
response = NewGetDeviceListResponse()
err = c.Send(request, response)
return
}
func NewGetDeviceLocationHistoryRequest() (request *GetDeviceLocationHistoryRequest) {
request = &GetDeviceLocationHistoryRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetDeviceLocationHistory")
return
}
func NewGetDeviceLocationHistoryResponse() (response *GetDeviceLocationHistoryResponse) {
response = &GetDeviceLocationHistoryResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetDeviceLocationHistory
// 获取设备历史位置
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetDeviceLocationHistory(request *GetDeviceLocationHistoryRequest) (response *GetDeviceLocationHistoryResponse, err error) {
if request == nil {
request = NewGetDeviceLocationHistoryRequest()
}
response = NewGetDeviceLocationHistoryResponse()
err = c.Send(request, response)
return
}
// GetDeviceLocationHistory
// 获取设备历史位置
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_STARTTIMELATERENDTIME = "InvalidParameterValue.StartTimeLaterEndTime"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetDeviceLocationHistoryWithContext(ctx context.Context, request *GetDeviceLocationHistoryRequest) (response *GetDeviceLocationHistoryResponse, err error) {
if request == nil {
request = NewGetDeviceLocationHistoryRequest()
}
request.SetContext(ctx)
response = NewGetDeviceLocationHistoryResponse()
err = c.Send(request, response)
return
}
func NewGetFamilyDeviceUserListRequest() (request *GetFamilyDeviceUserListRequest) {
request = &GetFamilyDeviceUserListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetFamilyDeviceUserList")
return
}
func NewGetFamilyDeviceUserListResponse() (response *GetFamilyDeviceUserListResponse) {
response = &GetFamilyDeviceUserListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetFamilyDeviceUserList
// 用于获取设备绑定的用户列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_DEVICENOTEXIST = "InvalidParameterValue.DeviceNotExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetFamilyDeviceUserList(request *GetFamilyDeviceUserListRequest) (response *GetFamilyDeviceUserListResponse, err error) {
if request == nil {
request = NewGetFamilyDeviceUserListRequest()
}
response = NewGetFamilyDeviceUserListResponse()
err = c.Send(request, response)
return
}
// GetFamilyDeviceUserList
// 用于获取设备绑定的用户列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_DEVICENOTEXIST = "InvalidParameterValue.DeviceNotExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetFamilyDeviceUserListWithContext(ctx context.Context, request *GetFamilyDeviceUserListRequest) (response *GetFamilyDeviceUserListResponse, err error) {
if request == nil {
request = NewGetFamilyDeviceUserListRequest()
}
request.SetContext(ctx)
response = NewGetFamilyDeviceUserListResponse()
err = c.Send(request, response)
return
}
func NewGetGatewaySubDeviceListRequest() (request *GetGatewaySubDeviceListRequest) {
request = &GetGatewaySubDeviceListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetGatewaySubDeviceList")
return
}
func NewGetGatewaySubDeviceListResponse() (response *GetGatewaySubDeviceListResponse) {
response = &GetGatewaySubDeviceListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetGatewaySubDeviceList
// 获取指定网关设备的子设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOINSTANCE = "UnauthorizedOperation.NoPermissionToInstance"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetGatewaySubDeviceList(request *GetGatewaySubDeviceListRequest) (response *GetGatewaySubDeviceListResponse, err error) {
if request == nil {
request = NewGetGatewaySubDeviceListRequest()
}
response = NewGetGatewaySubDeviceListResponse()
err = c.Send(request, response)
return
}
// GetGatewaySubDeviceList
// 获取指定网关设备的子设备列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOINSTANCE = "UnauthorizedOperation.NoPermissionToInstance"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) GetGatewaySubDeviceListWithContext(ctx context.Context, request *GetGatewaySubDeviceListRequest) (response *GetGatewaySubDeviceListResponse, err error) {
if request == nil {
request = NewGetGatewaySubDeviceListRequest()
}
request.SetContext(ctx)
response = NewGetGatewaySubDeviceListResponse()
err = c.Send(request, response)
return
}
func NewGetLoRaGatewayListRequest() (request *GetLoRaGatewayListRequest) {
request = &GetLoRaGatewayListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetLoRaGatewayList")
return
}
func NewGetLoRaGatewayListResponse() (response *GetLoRaGatewayListResponse) {
response = &GetLoRaGatewayListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetLoRaGatewayList
// 获取 LoRa 网关列表接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
func (c *Client) GetLoRaGatewayList(request *GetLoRaGatewayListRequest) (response *GetLoRaGatewayListResponse, err error) {
if request == nil {
request = NewGetLoRaGatewayListRequest()
}
response = NewGetLoRaGatewayListResponse()
err = c.Send(request, response)
return
}
// GetLoRaGatewayList
// 获取 LoRa 网关列表接口
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
func (c *Client) GetLoRaGatewayListWithContext(ctx context.Context, request *GetLoRaGatewayListRequest) (response *GetLoRaGatewayListResponse, err error) {
if request == nil {
request = NewGetLoRaGatewayListRequest()
}
request.SetContext(ctx)
response = NewGetLoRaGatewayListResponse()
err = c.Send(request, response)
return
}
func NewGetPositionSpaceListRequest() (request *GetPositionSpaceListRequest) {
request = &GetPositionSpaceListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetPositionSpaceList")
return
}
func NewGetPositionSpaceListResponse() (response *GetPositionSpaceListResponse) {
response = &GetPositionSpaceListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetPositionSpaceList
// 获取位置空间列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) GetPositionSpaceList(request *GetPositionSpaceListRequest) (response *GetPositionSpaceListResponse, err error) {
if request == nil {
request = NewGetPositionSpaceListRequest()
}
response = NewGetPositionSpaceListResponse()
err = c.Send(request, response)
return
}
// GetPositionSpaceList
// 获取位置空间列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) GetPositionSpaceListWithContext(ctx context.Context, request *GetPositionSpaceListRequest) (response *GetPositionSpaceListResponse, err error) {
if request == nil {
request = NewGetPositionSpaceListRequest()
}
request.SetContext(ctx)
response = NewGetPositionSpaceListResponse()
err = c.Send(request, response)
return
}
func NewGetProjectListRequest() (request *GetProjectListRequest) {
request = &GetProjectListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetProjectList")
return
}
func NewGetProjectListResponse() (response *GetProjectListResponse) {
response = &GetProjectListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetProjectList
// 提供查询用户所创建的项目列表查询功能。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOINSTANCE = "UnauthorizedOperation.NoPermissionToInstance"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetProjectList(request *GetProjectListRequest) (response *GetProjectListResponse, err error) {
if request == nil {
request = NewGetProjectListRequest()
}
response = NewGetProjectListResponse()
err = c.Send(request, response)
return
}
// GetProjectList
// 提供查询用户所创建的项目列表查询功能。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOINSTANCE = "UnauthorizedOperation.NoPermissionToInstance"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) GetProjectListWithContext(ctx context.Context, request *GetProjectListRequest) (response *GetProjectListResponse, err error) {
if request == nil {
request = NewGetProjectListRequest()
}
request.SetContext(ctx)
response = NewGetProjectListResponse()
err = c.Send(request, response)
return
}
func NewGetStudioProductListRequest() (request *GetStudioProductListRequest) {
request = &GetStudioProductListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetStudioProductList")
return
}
func NewGetStudioProductListResponse() (response *GetStudioProductListResponse) {
response = &GetStudioProductListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetStudioProductList
// 提供查询某个项目下所有产品信息的能力。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) GetStudioProductList(request *GetStudioProductListRequest) (response *GetStudioProductListResponse, err error) {
if request == nil {
request = NewGetStudioProductListRequest()
}
response = NewGetStudioProductListResponse()
err = c.Send(request, response)
return
}
// GetStudioProductList
// 提供查询某个项目下所有产品信息的能力。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) GetStudioProductListWithContext(ctx context.Context, request *GetStudioProductListRequest) (response *GetStudioProductListResponse, err error) {
if request == nil {
request = NewGetStudioProductListRequest()
}
request.SetContext(ctx)
response = NewGetStudioProductListResponse()
err = c.Send(request, response)
return
}
func NewGetTopicRuleListRequest() (request *GetTopicRuleListRequest) {
request = &GetTopicRuleListRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "GetTopicRuleList")
return
}
func NewGetTopicRuleListResponse() (response *GetTopicRuleListResponse) {
response = &GetTopicRuleListResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// GetTopicRuleList
// 获取规则列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) GetTopicRuleList(request *GetTopicRuleListRequest) (response *GetTopicRuleListResponse, err error) {
if request == nil {
request = NewGetTopicRuleListRequest()
}
response = NewGetTopicRuleListResponse()
err = c.Send(request, response)
return
}
// GetTopicRuleList
// 获取规则列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) GetTopicRuleListWithContext(ctx context.Context, request *GetTopicRuleListRequest) (response *GetTopicRuleListResponse, err error) {
if request == nil {
request = NewGetTopicRuleListRequest()
}
request.SetContext(ctx)
response = NewGetTopicRuleListResponse()
err = c.Send(request, response)
return
}
func NewListEventHistoryRequest() (request *ListEventHistoryRequest) {
request = &ListEventHistoryRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ListEventHistory")
return
}
func NewListEventHistoryResponse() (response *ListEventHistoryResponse) {
response = &ListEventHistoryResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ListEventHistory
// 获取设备的历史事件
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) ListEventHistory(request *ListEventHistoryRequest) (response *ListEventHistoryResponse, err error) {
if request == nil {
request = NewListEventHistoryRequest()
}
response = NewListEventHistoryResponse()
err = c.Send(request, response)
return
}
// ListEventHistory
// 获取设备的历史事件
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) ListEventHistoryWithContext(ctx context.Context, request *ListEventHistoryRequest) (response *ListEventHistoryResponse, err error) {
if request == nil {
request = NewListEventHistoryRequest()
}
request.SetContext(ctx)
response = NewListEventHistoryResponse()
err = c.Send(request, response)
return
}
func NewListFirmwaresRequest() (request *ListFirmwaresRequest) {
request = &ListFirmwaresRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ListFirmwares")
return
}
func NewListFirmwaresResponse() (response *ListFirmwaresResponse) {
response = &ListFirmwaresResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ListFirmwares
// 本接口(ListFirmwares)用于获取固件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) ListFirmwares(request *ListFirmwaresRequest) (response *ListFirmwaresResponse, err error) {
if request == nil {
request = NewListFirmwaresRequest()
}
response = NewListFirmwaresResponse()
err = c.Send(request, response)
return
}
// ListFirmwares
// 本接口(ListFirmwares)用于获取固件列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) ListFirmwaresWithContext(ctx context.Context, request *ListFirmwaresRequest) (response *ListFirmwaresResponse, err error) {
if request == nil {
request = NewListFirmwaresRequest()
}
request.SetContext(ctx)
response = NewListFirmwaresResponse()
err = c.Send(request, response)
return
}
func NewListTopicPolicyRequest() (request *ListTopicPolicyRequest) {
request = &ListTopicPolicyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ListTopicPolicy")
return
}
func NewListTopicPolicyResponse() (response *ListTopicPolicyResponse) {
response = &ListTopicPolicyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ListTopicPolicy
// 本接口(ListTopicPolicy)用于获取Topic列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) ListTopicPolicy(request *ListTopicPolicyRequest) (response *ListTopicPolicyResponse, err error) {
if request == nil {
request = NewListTopicPolicyRequest()
}
response = NewListTopicPolicyResponse()
err = c.Send(request, response)
return
}
// ListTopicPolicy
// 本接口(ListTopicPolicy)用于获取Topic列表
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
func (c *Client) ListTopicPolicyWithContext(ctx context.Context, request *ListTopicPolicyRequest) (response *ListTopicPolicyResponse, err error) {
if request == nil {
request = NewListTopicPolicyRequest()
}
request.SetContext(ctx)
response = NewListTopicPolicyResponse()
err = c.Send(request, response)
return
}
func NewModifyFenceBindRequest() (request *ModifyFenceBindRequest) {
request = &ModifyFenceBindRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyFenceBind")
return
}
func NewModifyFenceBindResponse() (response *ModifyFenceBindResponse) {
response = &ModifyFenceBindResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyFenceBind
// 更新围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) ModifyFenceBind(request *ModifyFenceBindRequest) (response *ModifyFenceBindResponse, err error) {
if request == nil {
request = NewModifyFenceBindRequest()
}
response = NewModifyFenceBindResponse()
err = c.Send(request, response)
return
}
// ModifyFenceBind
// 更新围栏绑定信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) ModifyFenceBindWithContext(ctx context.Context, request *ModifyFenceBindRequest) (response *ModifyFenceBindResponse, err error) {
if request == nil {
request = NewModifyFenceBindRequest()
}
request.SetContext(ctx)
response = NewModifyFenceBindResponse()
err = c.Send(request, response)
return
}
func NewModifyLoRaFrequencyRequest() (request *ModifyLoRaFrequencyRequest) {
request = &ModifyLoRaFrequencyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyLoRaFrequency")
return
}
func NewModifyLoRaFrequencyResponse() (response *ModifyLoRaFrequencyResponse) {
response = &ModifyLoRaFrequencyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyLoRaFrequency
// 修改LoRa自定义频点
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE_LORAFREQPARMSERROR = "InvalidParameterValue.LoRaFreqParmsError"
// INVALIDPARAMETERVALUE_VPNPARMSERROR = "InvalidParameterValue.VPNParmsError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_LORAFREQDUPKEYEXIST = "UnsupportedOperation.LoRaFreqDupKeyExist"
// UNSUPPORTEDOPERATION_VPNDUPKEYEXIST = "UnsupportedOperation.VPNDupKeyExist"
func (c *Client) ModifyLoRaFrequency(request *ModifyLoRaFrequencyRequest) (response *ModifyLoRaFrequencyResponse, err error) {
if request == nil {
request = NewModifyLoRaFrequencyRequest()
}
response = NewModifyLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
// ModifyLoRaFrequency
// 修改LoRa自定义频点
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE_LORAFREQPARMSERROR = "InvalidParameterValue.LoRaFreqParmsError"
// INVALIDPARAMETERVALUE_VPNPARMSERROR = "InvalidParameterValue.VPNParmsError"
// RESOURCENOTFOUND_STUDIOLORAFREQNOTEXIST = "ResourceNotFound.StudioLoRaFreqNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_LORAFREQDUPKEYEXIST = "UnsupportedOperation.LoRaFreqDupKeyExist"
// UNSUPPORTEDOPERATION_VPNDUPKEYEXIST = "UnsupportedOperation.VPNDupKeyExist"
func (c *Client) ModifyLoRaFrequencyWithContext(ctx context.Context, request *ModifyLoRaFrequencyRequest) (response *ModifyLoRaFrequencyResponse, err error) {
if request == nil {
request = NewModifyLoRaFrequencyRequest()
}
request.SetContext(ctx)
response = NewModifyLoRaFrequencyResponse()
err = c.Send(request, response)
return
}
func NewModifyLoRaGatewayRequest() (request *ModifyLoRaGatewayRequest) {
request = &ModifyLoRaGatewayRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyLoRaGateway")
return
}
func NewModifyLoRaGatewayResponse() (response *ModifyLoRaGatewayResponse) {
response = &ModifyLoRaGatewayResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyLoRaGateway
// 修改 LoRa 网关信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_GATEWAYNOTEXIST = "ResourceNotFound.GatewayNotExist"
func (c *Client) ModifyLoRaGateway(request *ModifyLoRaGatewayRequest) (response *ModifyLoRaGatewayResponse, err error) {
if request == nil {
request = NewModifyLoRaGatewayRequest()
}
response = NewModifyLoRaGatewayResponse()
err = c.Send(request, response)
return
}
// ModifyLoRaGateway
// 修改 LoRa 网关信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_GATEWAYNOTEXIST = "ResourceNotFound.GatewayNotExist"
func (c *Client) ModifyLoRaGatewayWithContext(ctx context.Context, request *ModifyLoRaGatewayRequest) (response *ModifyLoRaGatewayResponse, err error) {
if request == nil {
request = NewModifyLoRaGatewayRequest()
}
request.SetContext(ctx)
response = NewModifyLoRaGatewayResponse()
err = c.Send(request, response)
return
}
func NewModifyModelDefinitionRequest() (request *ModifyModelDefinitionRequest) {
request = &ModifyModelDefinitionRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyModelDefinition")
return
}
func NewModifyModelDefinitionResponse() (response *ModifyModelDefinitionResponse) {
response = &ModifyModelDefinitionResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyModelDefinition
// 提供修改产品的数据模板的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_APPDESCRIPTIONTOOLONG = "InvalidParameterValue.AppDescriptionTooLong"
// INVALIDPARAMETERVALUE_APPEXISTS = "InvalidParameterValue.AppExists"
// INVALIDPARAMETERVALUE_APPNAMETOOLONG = "InvalidParameterValue.AppNameTooLong"
// INVALIDPARAMETERVALUE_APPNOPERMISSION = "InvalidParameterValue.AppNoPermission"
// INVALIDPARAMETERVALUE_APPNOTEXISTS = "InvalidParameterValue.AppNotExists"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_ERRORTASKNOTEXIST = "InvalidParameterValue.ErrorTaskNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_MODELDEFINEDUPID = "InvalidParameterValue.ModelDefineDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORMODEL = "InvalidParameterValue.ModelDefineErrorModel"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORTYPE = "InvalidParameterValue.ModelDefineErrorType"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSDUPID = "InvalidParameterValue.ModelDefineEventParamsDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSEXCEEDLIMIT = "InvalidParameterValue.ModelDefineEventParamsExceedLimit"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPERROR = "InvalidParameterValue.ModelDefineEventPropError"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_MSGCONTENTINVALID = "InvalidParameterValue.MsgContentInvalid"
// INVALIDPARAMETERVALUE_MSGLEVELINVALID = "InvalidParameterValue.MsgLevelInvalid"
// INVALIDPARAMETERVALUE_MSGTITLEINVALID = "InvalidParameterValue.MsgTitleInvalid"
// INVALIDPARAMETERVALUE_MSGTYPEINVALID = "InvalidParameterValue.MsgTypeInvalid"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// INVALIDPARAMETERVALUE_USERIDINVALID = "InvalidParameterValue.UserIDInvalid"
// LIMITEXCEEDED_APPLICATIONEXCEEDLIMIT = "LimitExceeded.ApplicationExceedLimit"
// LIMITEXCEEDED_PROJECTEXCEEDLIMIT = "LimitExceeded.ProjectExceedLimit"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// LIMITEXCEEDED_THINGMODELEXCEEDLIMIT = "LimitExceeded.ThingModelExceedLimit"
// MISSINGPARAMETER_MODELDEFINEEVENTTYPEERROR = "MissingParameter.ModelDefineEventTypeError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_APPNOPERMISSION = "UnauthorizedOperation.AppNoPermission"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) ModifyModelDefinition(request *ModifyModelDefinitionRequest) (response *ModifyModelDefinitionResponse, err error) {
if request == nil {
request = NewModifyModelDefinitionRequest()
}
response = NewModifyModelDefinitionResponse()
err = c.Send(request, response)
return
}
// ModifyModelDefinition
// 提供修改产品的数据模板的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_APPDESCRIPTIONTOOLONG = "InvalidParameterValue.AppDescriptionTooLong"
// INVALIDPARAMETERVALUE_APPEXISTS = "InvalidParameterValue.AppExists"
// INVALIDPARAMETERVALUE_APPNAMETOOLONG = "InvalidParameterValue.AppNameTooLong"
// INVALIDPARAMETERVALUE_APPNOPERMISSION = "InvalidParameterValue.AppNoPermission"
// INVALIDPARAMETERVALUE_APPNOTEXISTS = "InvalidParameterValue.AppNotExists"
// INVALIDPARAMETERVALUE_DEVICENAMEINVALID = "InvalidParameterValue.DeviceNameInvalid"
// INVALIDPARAMETERVALUE_ERRORTASKNOTEXIST = "InvalidParameterValue.ErrorTaskNotExist"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_MODELDEFINEDUPID = "InvalidParameterValue.ModelDefineDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORMODEL = "InvalidParameterValue.ModelDefineErrorModel"
// INVALIDPARAMETERVALUE_MODELDEFINEERRORTYPE = "InvalidParameterValue.ModelDefineErrorType"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSDUPID = "InvalidParameterValue.ModelDefineEventParamsDupID"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPARAMSEXCEEDLIMIT = "InvalidParameterValue.ModelDefineEventParamsExceedLimit"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPERROR = "InvalidParameterValue.ModelDefineEventPropError"
// INVALIDPARAMETERVALUE_MODELDEFINEEVENTPROPNAMEERROR = "InvalidParameterValue.ModelDefineEventPropNameError"
// INVALIDPARAMETERVALUE_MODELDEFINEINVALID = "InvalidParameterValue.ModelDefineInvalid"
// INVALIDPARAMETERVALUE_MODELDEFINENIL = "InvalidParameterValue.ModelDefineNil"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPBOOLMAPPINGERROR = "InvalidParameterValue.ModelDefinePropBoolMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPENUMMAPPINGERROR = "InvalidParameterValue.ModelDefinePropEnumMappingError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEERROR = "InvalidParameterValue.ModelDefinePropRangeError"
// INVALIDPARAMETERVALUE_MODELDEFINEPROPRANGEOVERFLOW = "InvalidParameterValue.ModelDefinePropRangeOverflow"
// INVALIDPARAMETERVALUE_MSGCONTENTINVALID = "InvalidParameterValue.MsgContentInvalid"
// INVALIDPARAMETERVALUE_MSGLEVELINVALID = "InvalidParameterValue.MsgLevelInvalid"
// INVALIDPARAMETERVALUE_MSGTITLEINVALID = "InvalidParameterValue.MsgTitleInvalid"
// INVALIDPARAMETERVALUE_MSGTYPEINVALID = "InvalidParameterValue.MsgTypeInvalid"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// INVALIDPARAMETERVALUE_USERIDINVALID = "InvalidParameterValue.UserIDInvalid"
// LIMITEXCEEDED_APPLICATIONEXCEEDLIMIT = "LimitExceeded.ApplicationExceedLimit"
// LIMITEXCEEDED_PROJECTEXCEEDLIMIT = "LimitExceeded.ProjectExceedLimit"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// LIMITEXCEEDED_THINGMODELEXCEEDLIMIT = "LimitExceeded.ThingModelExceedLimit"
// MISSINGPARAMETER_MODELDEFINEEVENTTYPEERROR = "MissingParameter.ModelDefineEventTypeError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_APPNOPERMISSION = "UnauthorizedOperation.AppNoPermission"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_PRODUCTEXISTUNDERPROJECT = "UnsupportedOperation.ProductExistUnderProject"
func (c *Client) ModifyModelDefinitionWithContext(ctx context.Context, request *ModifyModelDefinitionRequest) (response *ModifyModelDefinitionResponse, err error) {
if request == nil {
request = NewModifyModelDefinitionRequest()
}
request.SetContext(ctx)
response = NewModifyModelDefinitionResponse()
err = c.Send(request, response)
return
}
func NewModifyPositionFenceRequest() (request *ModifyPositionFenceRequest) {
request = &ModifyPositionFenceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyPositionFence")
return
}
func NewModifyPositionFenceResponse() (response *ModifyPositionFenceResponse) {
response = &ModifyPositionFenceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyPositionFence
// 更新围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) ModifyPositionFence(request *ModifyPositionFenceRequest) (response *ModifyPositionFenceResponse, err error) {
if request == nil {
request = NewModifyPositionFenceRequest()
}
response = NewModifyPositionFenceResponse()
err = c.Send(request, response)
return
}
// ModifyPositionFence
// 更新围栏
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// RESOURCENOTFOUND_FENCENOTEXIST = "ResourceNotFound.FenceNotExist"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOFENCE = "UnauthorizedOperation.NoPermissionToStudioFence"
func (c *Client) ModifyPositionFenceWithContext(ctx context.Context, request *ModifyPositionFenceRequest) (response *ModifyPositionFenceResponse, err error) {
if request == nil {
request = NewModifyPositionFenceRequest()
}
request.SetContext(ctx)
response = NewModifyPositionFenceResponse()
err = c.Send(request, response)
return
}
func NewModifyPositionSpaceRequest() (request *ModifyPositionSpaceRequest) {
request = &ModifyPositionSpaceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyPositionSpace")
return
}
func NewModifyPositionSpaceResponse() (response *ModifyPositionSpaceResponse) {
response = &ModifyPositionSpaceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyPositionSpace
// 更新位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_SPACEDUPKEYEXIST = "UnsupportedOperation.SpaceDupKeyExist"
func (c *Client) ModifyPositionSpace(request *ModifyPositionSpaceRequest) (response *ModifyPositionSpaceResponse, err error) {
if request == nil {
request = NewModifyPositionSpaceRequest()
}
response = NewModifyPositionSpaceResponse()
err = c.Send(request, response)
return
}
// ModifyPositionSpace
// 更新位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_SPACEDUPKEYEXIST = "UnsupportedOperation.SpaceDupKeyExist"
func (c *Client) ModifyPositionSpaceWithContext(ctx context.Context, request *ModifyPositionSpaceRequest) (response *ModifyPositionSpaceResponse, err error) {
if request == nil {
request = NewModifyPositionSpaceRequest()
}
request.SetContext(ctx)
response = NewModifyPositionSpaceResponse()
err = c.Send(request, response)
return
}
func NewModifyProjectRequest() (request *ModifyProjectRequest) {
request = &ModifyProjectRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyProject")
return
}
func NewModifyProjectResponse() (response *ModifyProjectResponse) {
response = &ModifyProjectResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyProject
// 修改项目
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_PROJECTDUPKEYEXIST = "UnsupportedOperation.ProjectDupKeyExist"
func (c *Client) ModifyProject(request *ModifyProjectRequest) (response *ModifyProjectResponse, err error) {
if request == nil {
request = NewModifyProjectRequest()
}
response = NewModifyProjectResponse()
err = c.Send(request, response)
return
}
// ModifyProject
// 修改项目
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE_PROJECTPARMSERROR = "InvalidParameterValue.ProjectParmsError"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
// UNSUPPORTEDOPERATION_PROJECTDUPKEYEXIST = "UnsupportedOperation.ProjectDupKeyExist"
func (c *Client) ModifyProjectWithContext(ctx context.Context, request *ModifyProjectRequest) (response *ModifyProjectResponse, err error) {
if request == nil {
request = NewModifyProjectRequest()
}
request.SetContext(ctx)
response = NewModifyProjectResponse()
err = c.Send(request, response)
return
}
func NewModifySpacePropertyRequest() (request *ModifySpacePropertyRequest) {
request = &ModifySpacePropertyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifySpaceProperty")
return
}
func NewModifySpacePropertyResponse() (response *ModifySpacePropertyResponse) {
response = &ModifySpacePropertyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifySpaceProperty
// 更新位置空间产品属性
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ModifySpaceProperty(request *ModifySpacePropertyRequest) (response *ModifySpacePropertyResponse, err error) {
if request == nil {
request = NewModifySpacePropertyRequest()
}
response = NewModifySpacePropertyResponse()
err = c.Send(request, response)
return
}
// ModifySpaceProperty
// 更新位置空间产品属性
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_SPACENOTEXIST = "ResourceNotFound.SpaceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ModifySpacePropertyWithContext(ctx context.Context, request *ModifySpacePropertyRequest) (response *ModifySpacePropertyResponse, err error) {
if request == nil {
request = NewModifySpacePropertyRequest()
}
request.SetContext(ctx)
response = NewModifySpacePropertyResponse()
err = c.Send(request, response)
return
}
func NewModifyStudioProductRequest() (request *ModifyStudioProductRequest) {
request = &ModifyStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyStudioProduct")
return
}
func NewModifyStudioProductResponse() (response *ModifyStudioProductResponse) {
response = &ModifyStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyStudioProduct
// 提供修改产品的名称和描述等信息的能力,对于已发布产品不允许进行修改。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ModifyStudioProduct(request *ModifyStudioProductRequest) (response *ModifyStudioProductResponse, err error) {
if request == nil {
request = NewModifyStudioProductRequest()
}
response = NewModifyStudioProductResponse()
err = c.Send(request, response)
return
}
// ModifyStudioProduct
// 提供修改产品的名称和描述等信息的能力,对于已发布产品不允许进行修改。
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INTERNALERROR_INTERNALSERVEREXCEPTIONDB = "InternalError.InternalServerExceptionDB"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_MODELDEFINEDONTMATCHTEMPLATE = "InvalidParameterValue.ModelDefineDontMatchTemplate"
// INVALIDPARAMETERVALUE_PRODUCTALREADYEXIST = "InvalidParameterValue.ProductAlreadyExist"
// INVALIDPARAMETERVALUE_PRODUCTIDINVALID = "InvalidParameterValue.ProductIDInvalid"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// LIMITEXCEEDED_STUDIOPRODUCTEXCEEDLIMIT = "LimitExceeded.StudioProductExceedLimit"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_PROJECTNOTEXIST = "ResourceNotFound.ProjectNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOPROJECT = "UnauthorizedOperation.NoPermissionToProject"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ModifyStudioProductWithContext(ctx context.Context, request *ModifyStudioProductRequest) (response *ModifyStudioProductResponse, err error) {
if request == nil {
request = NewModifyStudioProductRequest()
}
request.SetContext(ctx)
response = NewModifyStudioProductResponse()
err = c.Send(request, response)
return
}
func NewModifyTopicPolicyRequest() (request *ModifyTopicPolicyRequest) {
request = &ModifyTopicPolicyRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyTopicPolicy")
return
}
func NewModifyTopicPolicyResponse() (response *ModifyTopicPolicyResponse) {
response = &ModifyTopicPolicyResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyTopicPolicy
// 本接口(UpdateTopicPolicy)用于更新Topic信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_TOPICPOLICYALREADYEXIST = "InvalidParameterValue.TopicPolicyAlreadyExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) ModifyTopicPolicy(request *ModifyTopicPolicyRequest) (response *ModifyTopicPolicyResponse, err error) {
if request == nil {
request = NewModifyTopicPolicyRequest()
}
response = NewModifyTopicPolicyResponse()
err = c.Send(request, response)
return
}
// ModifyTopicPolicy
// 本接口(UpdateTopicPolicy)用于更新Topic信息
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_TOPICPOLICYALREADYEXIST = "InvalidParameterValue.TopicPolicyAlreadyExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// RESOURCENOTFOUND_TOPICPOLICYNOTEXIST = "ResourceNotFound.TopicPolicyNotExist"
func (c *Client) ModifyTopicPolicyWithContext(ctx context.Context, request *ModifyTopicPolicyRequest) (response *ModifyTopicPolicyResponse, err error) {
if request == nil {
request = NewModifyTopicPolicyRequest()
}
request.SetContext(ctx)
response = NewModifyTopicPolicyResponse()
err = c.Send(request, response)
return
}
func NewModifyTopicRuleRequest() (request *ModifyTopicRuleRequest) {
request = &ModifyTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ModifyTopicRule")
return
}
func NewModifyTopicRuleResponse() (response *ModifyTopicRuleResponse) {
response = &ModifyTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ModifyTopicRule
// 修改规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_CHECKFORWARDURLFAIL = "InvalidParameterValue.CheckForwardURLFail"
// INVALIDPARAMETERVALUE_FAILACTIONHASSAMEDEVICE = "InvalidParameterValue.FailActionHasSameDevice"
// INVALIDPARAMETERVALUE_FORWARDREDIRECTDENIED = "InvalidParameterValue.ForwardRedirectDenied"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_OPERATIONDENIED = "InvalidParameterValue.OperationDenied"
// INVALIDPARAMETERVALUE_REPUBLISHTOPICFORMATERROR = "InvalidParameterValue.RepublishTopicFormatError"
// INVALIDPARAMETERVALUE_TOPICRULEALREADYEXIST = "InvalidParameterValue.TopicRuleAlreadyExist"
// INVALIDPARAMETERVALUE_UPDATETOPICRULEDBFAIL = "InvalidParameterValue.UpdateTopicRuleDBFail"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_PERMISSIONDENIED = "UnauthorizedOperation.PermissionDenied"
func (c *Client) ModifyTopicRule(request *ModifyTopicRuleRequest) (response *ModifyTopicRuleResponse, err error) {
if request == nil {
request = NewModifyTopicRuleRequest()
}
response = NewModifyTopicRuleResponse()
err = c.Send(request, response)
return
}
// ModifyTopicRule
// 修改规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_CHECKFORWARDURLFAIL = "InvalidParameterValue.CheckForwardURLFail"
// INVALIDPARAMETERVALUE_FAILACTIONHASSAMEDEVICE = "InvalidParameterValue.FailActionHasSameDevice"
// INVALIDPARAMETERVALUE_FORWARDREDIRECTDENIED = "InvalidParameterValue.ForwardRedirectDenied"
// INVALIDPARAMETERVALUE_INVALIDSQL = "InvalidParameterValue.InvalidSQL"
// INVALIDPARAMETERVALUE_OPERATIONDENIED = "InvalidParameterValue.OperationDenied"
// INVALIDPARAMETERVALUE_REPUBLISHTOPICFORMATERROR = "InvalidParameterValue.RepublishTopicFormatError"
// INVALIDPARAMETERVALUE_TOPICRULEALREADYEXIST = "InvalidParameterValue.TopicRuleAlreadyExist"
// INVALIDPARAMETERVALUE_UPDATETOPICRULEDBFAIL = "InvalidParameterValue.UpdateTopicRuleDBFail"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_PERMISSIONDENIED = "UnauthorizedOperation.PermissionDenied"
func (c *Client) ModifyTopicRuleWithContext(ctx context.Context, request *ModifyTopicRuleRequest) (response *ModifyTopicRuleResponse, err error) {
if request == nil {
request = NewModifyTopicRuleRequest()
}
request.SetContext(ctx)
response = NewModifyTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewPublishMessageRequest() (request *PublishMessageRequest) {
request = &PublishMessageRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "PublishMessage")
return
}
func NewPublishMessageResponse() (response *PublishMessageResponse) {
response = &PublishMessageResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// PublishMessage
// 本接口(PublishMessage)用于使用自定义透传协议进行设备远控
//
// 可能返回的错误码:
// FAILEDOPERATION = "FailedOperation"
// FAILEDOPERATION_DEVICENOSUBSCRIPTION = "FailedOperation.DeviceNoSubscription"
// FAILEDOPERATION_DEVICEOFFLINE = "FailedOperation.DeviceOffline"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// LIMITEXCEEDED_MESSAGESAVED = "LimitExceeded.MessageSaved"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) PublishMessage(request *PublishMessageRequest) (response *PublishMessageResponse, err error) {
if request == nil {
request = NewPublishMessageRequest()
}
response = NewPublishMessageResponse()
err = c.Send(request, response)
return
}
// PublishMessage
// 本接口(PublishMessage)用于使用自定义透传协议进行设备远控
//
// 可能返回的错误码:
// FAILEDOPERATION = "FailedOperation"
// FAILEDOPERATION_DEVICENOSUBSCRIPTION = "FailedOperation.DeviceNoSubscription"
// FAILEDOPERATION_DEVICEOFFLINE = "FailedOperation.DeviceOffline"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// LIMITEXCEEDED_MESSAGESAVED = "LimitExceeded.MessageSaved"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_PRODUCTORDEVICENOTEXIST = "ResourceNotFound.ProductOrDeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) PublishMessageWithContext(ctx context.Context, request *PublishMessageRequest) (response *PublishMessageResponse, err error) {
if request == nil {
request = NewPublishMessageRequest()
}
request.SetContext(ctx)
response = NewPublishMessageResponse()
err = c.Send(request, response)
return
}
func NewPublishRRPCMessageRequest() (request *PublishRRPCMessageRequest) {
request = &PublishRRPCMessageRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "PublishRRPCMessage")
return
}
func NewPublishRRPCMessageResponse() (response *PublishRRPCMessageResponse) {
response = &PublishRRPCMessageResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// PublishRRPCMessage
// 下发RRPC消息
//
// 可能返回的错误码:
// FAILEDOPERATION_DEVICENOSUBSCRIPTION = "FailedOperation.DeviceNoSubscription"
// FAILEDOPERATION_DEVICEOFFLINE = "FailedOperation.DeviceOffline"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) PublishRRPCMessage(request *PublishRRPCMessageRequest) (response *PublishRRPCMessageResponse, err error) {
if request == nil {
request = NewPublishRRPCMessageRequest()
}
response = NewPublishRRPCMessageResponse()
err = c.Send(request, response)
return
}
// PublishRRPCMessage
// 下发RRPC消息
//
// 可能返回的错误码:
// FAILEDOPERATION_DEVICENOSUBSCRIPTION = "FailedOperation.DeviceNoSubscription"
// FAILEDOPERATION_DEVICEOFFLINE = "FailedOperation.DeviceOffline"
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) PublishRRPCMessageWithContext(ctx context.Context, request *PublishRRPCMessageRequest) (response *PublishRRPCMessageResponse, err error) {
if request == nil {
request = NewPublishRRPCMessageRequest()
}
request.SetContext(ctx)
response = NewPublishRRPCMessageResponse()
err = c.Send(request, response)
return
}
func NewReleaseStudioProductRequest() (request *ReleaseStudioProductRequest) {
request = &ReleaseStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "ReleaseStudioProduct")
return
}
func NewReleaseStudioProductResponse() (response *ReleaseStudioProductResponse) {
response = &ReleaseStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// ReleaseStudioProduct
// 产品开发完成并测试通过后,通过发布产品将产品设置为发布状态
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ReleaseStudioProduct(request *ReleaseStudioProductRequest) (response *ReleaseStudioProductResponse, err error) {
if request == nil {
request = NewReleaseStudioProductRequest()
}
response = NewReleaseStudioProductResponse()
err = c.Send(request, response)
return
}
// ReleaseStudioProduct
// 产品开发完成并测试通过后,通过发布产品将产品设置为发布状态
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// RESOURCENOTFOUND_PRODUCTNOTEXIST = "ResourceNotFound.ProductNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
func (c *Client) ReleaseStudioProductWithContext(ctx context.Context, request *ReleaseStudioProductRequest) (response *ReleaseStudioProductResponse, err error) {
if request == nil {
request = NewReleaseStudioProductRequest()
}
request.SetContext(ctx)
response = NewReleaseStudioProductResponse()
err = c.Send(request, response)
return
}
func NewSearchPositionSpaceRequest() (request *SearchPositionSpaceRequest) {
request = &SearchPositionSpaceRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "SearchPositionSpace")
return
}
func NewSearchPositionSpaceResponse() (response *SearchPositionSpaceResponse) {
response = &SearchPositionSpaceResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// SearchPositionSpace
// 搜索位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) SearchPositionSpace(request *SearchPositionSpaceRequest) (response *SearchPositionSpaceResponse, err error) {
if request == nil {
request = NewSearchPositionSpaceRequest()
}
response = NewSearchPositionSpaceResponse()
err = c.Send(request, response)
return
}
// SearchPositionSpace
// 搜索位置空间
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) SearchPositionSpaceWithContext(ctx context.Context, request *SearchPositionSpaceRequest) (response *SearchPositionSpaceResponse, err error) {
if request == nil {
request = NewSearchPositionSpaceRequest()
}
request.SetContext(ctx)
response = NewSearchPositionSpaceResponse()
err = c.Send(request, response)
return
}
func NewSearchStudioProductRequest() (request *SearchStudioProductRequest) {
request = &SearchStudioProductRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "SearchStudioProduct")
return
}
func NewSearchStudioProductResponse() (response *SearchStudioProductResponse) {
response = &SearchStudioProductResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// SearchStudioProduct
// 提供根据产品名称查找产品的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) SearchStudioProduct(request *SearchStudioProductRequest) (response *SearchStudioProductResponse, err error) {
if request == nil {
request = NewSearchStudioProductRequest()
}
response = NewSearchStudioProductResponse()
err = c.Send(request, response)
return
}
// SearchStudioProduct
// 提供根据产品名称查找产品的能力
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_DBOPERTAIONERROR = "InternalError.DBOpertaionError"
// INVALIDPARAMETERVALUE_PRODUCTPARMSERROR = "InvalidParameterValue.ProductParmsError"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) SearchStudioProductWithContext(ctx context.Context, request *SearchStudioProductRequest) (response *SearchStudioProductResponse, err error) {
if request == nil {
request = NewSearchStudioProductRequest()
}
request.SetContext(ctx)
response = NewSearchStudioProductResponse()
err = c.Send(request, response)
return
}
func NewSearchTopicRuleRequest() (request *SearchTopicRuleRequest) {
request = &SearchTopicRuleRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "SearchTopicRule")
return
}
func NewSearchTopicRuleResponse() (response *SearchTopicRuleResponse) {
response = &SearchTopicRuleResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// SearchTopicRule
// 搜索规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) SearchTopicRule(request *SearchTopicRuleRequest) (response *SearchTopicRuleResponse, err error) {
if request == nil {
request = NewSearchTopicRuleRequest()
}
response = NewSearchTopicRuleResponse()
err = c.Send(request, response)
return
}
// SearchTopicRule
// 搜索规则
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_TOPICRULENOTEXIST = "ResourceNotFound.TopicRuleNotExist"
// UNSUPPORTEDOPERATION = "UnsupportedOperation"
func (c *Client) SearchTopicRuleWithContext(ctx context.Context, request *SearchTopicRuleRequest) (response *SearchTopicRuleResponse, err error) {
if request == nil {
request = NewSearchTopicRuleRequest()
}
request.SetContext(ctx)
response = NewSearchTopicRuleResponse()
err = c.Send(request, response)
return
}
func NewUnbindDevicesRequest() (request *UnbindDevicesRequest) {
request = &UnbindDevicesRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "UnbindDevices")
return
}
func NewUnbindDevicesResponse() (response *UnbindDevicesResponse) {
response = &UnbindDevicesResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// UnbindDevices
// 批量解绑子设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) UnbindDevices(request *UnbindDevicesRequest) (response *UnbindDevicesResponse, err error) {
if request == nil {
request = NewUnbindDevicesRequest()
}
response = NewUnbindDevicesResponse()
err = c.Send(request, response)
return
}
// UnbindDevices
// 批量解绑子设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
func (c *Client) UnbindDevicesWithContext(ctx context.Context, request *UnbindDevicesRequest) (response *UnbindDevicesResponse, err error) {
if request == nil {
request = NewUnbindDevicesRequest()
}
request.SetContext(ctx)
response = NewUnbindDevicesResponse()
err = c.Send(request, response)
return
}
func NewUnbindProductsRequest() (request *UnbindProductsRequest) {
request = &UnbindProductsRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "UnbindProducts")
return
}
func NewUnbindProductsResponse() (response *UnbindProductsResponse) {
response = &UnbindProductsResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// UnbindProducts
// 批量解绑子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_EXISTBINDEDDEVICESUNDERGATEWAYPRODUCT = "UnsupportedOperation.ExistBindedDevicesUnderGatewayProduct"
func (c *Client) UnbindProducts(request *UnbindProductsRequest) (response *UnbindProductsResponse, err error) {
if request == nil {
request = NewUnbindProductsRequest()
}
response = NewUnbindProductsResponse()
err = c.Send(request, response)
return
}
// UnbindProducts
// 批量解绑子产品
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// INVALIDPARAMETER = "InvalidParameter"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNSUPPORTEDOPERATION_EXISTBINDEDDEVICESUNDERGATEWAYPRODUCT = "UnsupportedOperation.ExistBindedDevicesUnderGatewayProduct"
func (c *Client) UnbindProductsWithContext(ctx context.Context, request *UnbindProductsRequest) (response *UnbindProductsResponse, err error) {
if request == nil {
request = NewUnbindProductsRequest()
}
request.SetContext(ctx)
response = NewUnbindProductsResponse()
err = c.Send(request, response)
return
}
func NewUpdateDevicesEnableStateRequest() (request *UpdateDevicesEnableStateRequest) {
request = &UpdateDevicesEnableStateRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "UpdateDevicesEnableState")
return
}
func NewUpdateDevicesEnableStateResponse() (response *UpdateDevicesEnableStateResponse) {
response = &UpdateDevicesEnableStateResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// UpdateDevicesEnableState
// 批量禁用启用设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UpdateDevicesEnableState(request *UpdateDevicesEnableStateRequest) (response *UpdateDevicesEnableStateResponse, err error) {
if request == nil {
request = NewUpdateDevicesEnableStateRequest()
}
response = NewUpdateDevicesEnableStateResponse()
err = c.Send(request, response)
return
}
// UpdateDevicesEnableState
// 批量禁用启用设备
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INTERNALERROR_INTERNALRPCERROR = "InternalError.InternalRPCError"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_INSTANCENOTEXIST = "ResourceNotFound.InstanceNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION = "UnauthorizedOperation"
// UNAUTHORIZEDOPERATION_DEVICEHASALREADYBINDGATEWAY = "UnauthorizedOperation.DeviceHasAlreadyBindGateway"
// UNAUTHORIZEDOPERATION_GATEWAYHASBINDEDDEVICES = "UnauthorizedOperation.GatewayHasBindedDevices"
// UNSUPPORTEDOPERATION_DEVICEOTATASKINPROGRESS = "UnsupportedOperation.DeviceOtaTaskInProgress"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UpdateDevicesEnableStateWithContext(ctx context.Context, request *UpdateDevicesEnableStateRequest) (response *UpdateDevicesEnableStateResponse, err error) {
if request == nil {
request = NewUpdateDevicesEnableStateRequest()
}
request.SetContext(ctx)
response = NewUpdateDevicesEnableStateResponse()
err = c.Send(request, response)
return
}
func NewUpdateFirmwareRequest() (request *UpdateFirmwareRequest) {
request = &UpdateFirmwareRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "UpdateFirmware")
return
}
func NewUpdateFirmwareResponse() (response *UpdateFirmwareResponse) {
response = &UpdateFirmwareResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// UpdateFirmware
// 本接口(UpdateFirmware)用于对指定设备发起固件升级请求
//
// 可能返回的错误码:
// FAILEDOPERATION_DEVICEFIRMWAREISUPDATED = "FailedOperation.DeviceFirmwareIsUpdated"
// FAILEDOPERATION_DEVICEINFOOUTDATED = "FailedOperation.DeviceInfoOutdated"
// FAILEDOPERATION_OTHERUPDATETASKEXIST = "FailedOperation.OtherUpdateTaskExist"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICEHASNOFIRMWARE = "ResourceNotFound.DeviceHasNoFirmware"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_FIRMWARENOTEXIST = "ResourceNotFound.FirmwareNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_DEVICEISNOTENABLED = "UnauthorizedOperation.DeviceIsNotEnabled"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UpdateFirmware(request *UpdateFirmwareRequest) (response *UpdateFirmwareResponse, err error) {
if request == nil {
request = NewUpdateFirmwareRequest()
}
response = NewUpdateFirmwareResponse()
err = c.Send(request, response)
return
}
// UpdateFirmware
// 本接口(UpdateFirmware)用于对指定设备发起固件升级请求
//
// 可能返回的错误码:
// FAILEDOPERATION_DEVICEFIRMWAREISUPDATED = "FailedOperation.DeviceFirmwareIsUpdated"
// FAILEDOPERATION_DEVICEINFOOUTDATED = "FailedOperation.DeviceInfoOutdated"
// FAILEDOPERATION_OTHERUPDATETASKEXIST = "FailedOperation.OtherUpdateTaskExist"
// INTERNALERROR = "InternalError"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// RESOURCENOTFOUND_DEVICEHASNOFIRMWARE = "ResourceNotFound.DeviceHasNoFirmware"
// RESOURCENOTFOUND_DEVICENOTEXIST = "ResourceNotFound.DeviceNotExist"
// RESOURCENOTFOUND_FIRMWARENOTEXIST = "ResourceNotFound.FirmwareNotExist"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNAUTHORIZEDOPERATION_DEVICEISNOTENABLED = "UnauthorizedOperation.DeviceIsNotEnabled"
// UNAUTHORIZEDOPERATION_NOPERMISSIONTOSTUDIOPRODUCT = "UnauthorizedOperation.NoPermissionToStudioProduct"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UpdateFirmwareWithContext(ctx context.Context, request *UpdateFirmwareRequest) (response *UpdateFirmwareResponse, err error) {
if request == nil {
request = NewUpdateFirmwareRequest()
}
request.SetContext(ctx)
response = NewUpdateFirmwareResponse()
err = c.Send(request, response)
return
}
func NewUploadFirmwareRequest() (request *UploadFirmwareRequest) {
request = &UploadFirmwareRequest{
BaseRequest: &tchttp.BaseRequest{},
}
request.Init().WithApiInfo("iotexplorer", APIVersion, "UploadFirmware")
return
}
func NewUploadFirmwareResponse() (response *UploadFirmwareResponse) {
response = &UploadFirmwareResponse{
BaseResponse: &tchttp.BaseResponse{},
}
return
}
// UploadFirmware
// 本接口(UploadFirmware)用于上传设备固件至平台
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER_FIRMWAREALREADYEXIST = "InvalidParameter.FirmwareAlreadyExist"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_FIRMWAREALREADYEXIST = "InvalidParameterValue.FirmwareAlreadyExist"
// LIMITEXCEEDED_FIRMWAREEXCEEDLIMIT = "LimitExceeded.FirmwareExceedLimit"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UploadFirmware(request *UploadFirmwareRequest) (response *UploadFirmwareResponse, err error) {
if request == nil {
request = NewUploadFirmwareRequest()
}
response = NewUploadFirmwareResponse()
err = c.Send(request, response)
return
}
// UploadFirmware
// 本接口(UploadFirmware)用于上传设备固件至平台
//
// 可能返回的错误码:
// INTERNALERROR = "InternalError"
// INVALIDPARAMETER_FIRMWAREALREADYEXIST = "InvalidParameter.FirmwareAlreadyExist"
// INVALIDPARAMETERVALUE = "InvalidParameterValue"
// INVALIDPARAMETERVALUE_FIRMWAREALREADYEXIST = "InvalidParameterValue.FirmwareAlreadyExist"
// LIMITEXCEEDED_FIRMWAREEXCEEDLIMIT = "LimitExceeded.FirmwareExceedLimit"
// RESOURCENOTFOUND_STUDIOPRODUCTNOTEXIST = "ResourceNotFound.StudioProductNotExist"
// UNSUPPORTEDOPERATION_INSTANCEISOLATED = "UnsupportedOperation.InstanceIsolated"
func (c *Client) UploadFirmwareWithContext(ctx context.Context, request *UploadFirmwareRequest) (response *UploadFirmwareResponse, err error) {
if request == nil {
request = NewUploadFirmwareRequest()
}
request.SetContext(ctx)
response = NewUploadFirmwareResponse()
err = c.Send(request, response)
return
}
| [] | [] | [] | [] | [] | go | null | null | null |
test/e2e/storage/sanity.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"bytes"
"context"
"flag"
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/container-storage-interface/spec/lib/go/csi"
api "github.com/intel/pmem-csi/pkg/apis/pmemcsi/v1beta1"
"github.com/kubernetes-csi/csi-test/v3/pkg/sanity"
sanityutils "github.com/kubernetes-csi/csi-test/v3/utils"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/skipper"
testutils "k8s.io/kubernetes/test/utils"
"github.com/intel/pmem-csi/test/e2e/deploy"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
numSanityWorkers = flag.Int("pmem.sanity.workers", 10, "number of worker creating volumes in parallel and thus also the maximum number of volumes at any time")
// 0 = default is overridden below.
numSanityVolumes = flag.Int("pmem.sanity.volumes", 0, "number of total volumes to create")
sanityVolumeSize = flag.String("pmem.sanity.volume-size", "15Mi", "size of each volume")
)
// Run the csi-test sanity tests against a PMEM-CSI driver.
var _ = deploy.DescribeForSome("sanity", func(d *deploy.Deployment) bool {
// This test expects that PMEM-CSI was deployed with
// socat port forwarding enabled (see deploy/kustomize/testing/README.md).
// This is not the case when deployed in production mode.
return d.Testing
}, func(d *deploy.Deployment) {
config := sanity.NewTestConfig()
config.TestVolumeSize = 1 * 1024 * 1024
// The actual directories will be created as unique
// temp directories inside these directories.
// We intentionally do not use the real /var/lib/kubelet/pods as
// root for the target path, because kubelet is monitoring it
// and deletes all extra entries that it does not know about.
config.TargetPath = "/var/lib/kubelet/plugins/kubernetes.io/csi/pv/pmem-sanity-target.XXXXXX"
config.StagingPath = "/var/lib/kubelet/plugins/kubernetes.io/csi/pv/pmem-sanity-staging.XXXXXX"
config.ControllerDialOptions = []grpc.DialOption{
// For our restart tests.
grpc.WithKeepaliveParams(keepalive.ClientParameters{
PermitWithoutStream: true,
// This is the minimum. Specifying it explicitly
// avoids some log output from gRPC.
Time: 10 * time.Second,
}),
// For plain HTTP.
grpc.WithInsecure(),
}
f := framework.NewDefaultFramework("pmem")
f.SkipNamespaceCreation = true // We don't need a per-test namespace and skipping it makes the tests run faster.
var execOnTestNode func(args ...string) string
var cleanup func()
var cluster *deploy.Cluster
BeforeEach(func() {
cs := f.ClientSet
var err error
cluster, err = deploy.NewCluster(cs, f.DynamicClient)
framework.ExpectNoError(err, "query cluster")
config.Address, config.ControllerAddress, err = deploy.LookupCSIAddresses(cluster, d.Namespace)
framework.ExpectNoError(err, "find CSI addresses")
framework.Logf("sanity: using controller %s and node %s", config.ControllerAddress, config.Address)
// f.ExecCommandInContainerWithFullOutput assumes that we want a pod in the test's namespace,
// so we have to set one.
f.Namespace = &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: d.Namespace,
},
}
// Avoid looking for the socat pod unless we actually need it.
// Some tests might just get skipped. This has to be thread-safe
// for the sanity stress test.
var socat *v1.Pod
var mutex sync.Mutex
getSocatPod := func() *v1.Pod {
mutex.Lock()
defer mutex.Unlock()
if socat != nil {
return socat
}
socat = cluster.WaitForAppInstance(labels.Set{
"app.kubernetes.io/component": "node-testing",
"app.kubernetes.io/part-of": "pmem-csi",
},
cluster.NodeIP(1), d.Namespace)
return socat
}
execOnTestNode = func(args ...string) string {
// Wait for socat pod on that node. We need it for
// creating directories. We could use the PMEM-CSI
// node container, but that then forces us to have
// mkdir and rmdir in that container, which we might
// not want long-term.
socat := getSocatPod()
for {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(socat.Name, "socat", args...)
if err != nil {
exitErr, ok := err.(clientexec.ExitError)
if ok && exitErr.ExitStatus() == 126 {
// This doesn't necessarily mean that the actual binary cannot
// be executed. It also can be an error in the code which
// prepares for running in the container.
framework.Logf("126 = 'cannot execute' error, trying again")
continue
}
}
framework.ExpectNoError(err, "%s in socat container, stderr:\n%s", args, stderr)
Expect(stderr).To(BeEmpty(), "unexpected stderr from %s in socat container", args)
By("Exec Output: " + stdout)
return stdout
}
}
mkdir := func(path string) (string, error) {
path = execOnTestNode("mktemp", "-d", path)
// Ensure that the path that we created
// survives a sudden power loss (as during the
// restart tests below), otherwise rmdir will
// fail when it's gone.
execOnTestNode("sync", "-f", path)
return path, nil
}
rmdir := func(path string) error {
execOnTestNode("rmdir", path)
return nil
}
config.CreateTargetDir = mkdir
config.CreateStagingDir = mkdir
config.RemoveTargetPath = rmdir
config.RemoveStagingPath = rmdir
})
AfterEach(func() {
if cleanup != nil {
cleanup()
}
})
// This adds several tests that just get skipped.
// TODO: static definition of driver capabilities (https://github.com/kubernetes-csi/csi-test/issues/143)
sc := sanity.GinkgoTest(&config)
// The test context caches a connection to the CSI driver.
// When we re-deploy, gRPC will try to reconnect for that connection,
// leading to log messages about connection errors because the node port
// is allocated dynamically and changes when redeploying. Therefore
// we register a hook which clears the connection when PMEM-CSI
// gets re-deployed.
scFinalize := func() {
sc.Finalize()
// Not sure why this isn't in Finalize - a bug?
sc.Conn = nil
sc.ControllerConn = nil
}
deploy.AddUninstallHook(func(deploymentName string) {
framework.Logf("sanity: deployment %s is gone, closing test connections to controller %s and node %s.",
deploymentName,
config.ControllerAddress,
config.Address)
scFinalize()
})
var _ = Describe("PMEM-CSI", func() {
var (
cl *sanity.Cleanup
nc csi.NodeClient
cc, ncc csi.ControllerClient
nodeID string
v volume
cancel func()
rebooted bool
)
BeforeEach(func() {
sc.Setup()
nc = csi.NewNodeClient(sc.Conn)
cc = csi.NewControllerClient(sc.ControllerConn)
ncc = csi.NewControllerClient(sc.Conn) // This works because PMEM-CSI exposes the node, controller, and ID server via its csi.sock.
cl = &sanity.Cleanup{
Context: sc,
NodeClient: nc,
ControllerClient: cc,
ControllerPublishSupported: true,
NodeStageSupported: true,
}
rebooted = false
nid, err := nc.NodeGetInfo(
context.Background(),
&csi.NodeGetInfoRequest{})
framework.ExpectNoError(err, "get node ID")
nodeID = nid.GetNodeId()
// Default timeout for tests.
ctx, c := context.WithTimeout(context.Background(), 5*time.Minute)
cancel = c
v = volume{
namePrefix: "unset",
ctx: ctx,
sc: sc,
cc: cc,
nc: nc,
cl: cl,
}
})
AfterEach(func() {
cl.DeleteVolumes()
cancel()
sc.Teardown()
if rebooted {
// Remove all cached connections, too.
scFinalize()
// Rebooting a node increases the restart counter of
// the containers. This is normal in that case, but
// for the next test triggers the check that
// containers shouldn't restart. To get around that,
// we delete all PMEM-CSI pods after a reboot test.
By("stopping all PMEM-CSI pods after rebooting some node(s)")
d.DeleteAllPods(cluster)
}
})
It("stores state across reboots for single volume", func() {
canRestartNode(nodeID)
execOnTestNode("sync")
v.namePrefix = "state-volume"
// We intentionally check the state of the controller on the node here.
// The master caches volumes and does not get rebooted.
initialVolumes, err := ncc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "list volumes")
volName, vol := v.create(11*1024*1024, nodeID)
createdVolumes, err := ncc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "Failed to list volumes after reboot")
Expect(createdVolumes.Entries).To(HaveLen(len(initialVolumes.Entries)+1), "one more volume on : %s", nodeID)
// Restart.
rebooted = true
restartNode(f.ClientSet, nodeID, sc)
// Once we get an answer, it is expected to be the same as before.
By("checking volumes")
restartedVolumes, err := ncc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "Failed to list volumes after reboot")
Expect(restartedVolumes.Entries).To(ConsistOf(createdVolumes.Entries), "same volumes as before node reboot")
v.remove(vol, volName)
})
It("can mount again after reboot", func() {
canRestartNode(nodeID)
execOnTestNode("sync")
v.namePrefix = "mount-volume"
name, vol := v.create(22*1024*1024, nodeID)
// Publish for the second time.
nodeID := v.publish(name, vol)
// Restart.
rebooted = true
restartNode(f.ClientSet, nodeID, sc)
_, err := ncc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "Failed to list volumes after reboot")
// No failure expected although rebooting already unmounted the volume.
// We still need to remove the target path, if it has survived
// the hard power off.
v.unpublish(vol, nodeID)
// Publish for the second time.
v.publish(name, vol)
v.unpublish(vol, nodeID)
v.remove(vol, name)
})
It("can publish volume after a node driver restart", func() {
restartPod := ""
v.namePrefix = "mount-volume"
pods, err := WaitForPodsWithLabelRunningReady(f.ClientSet, d.Namespace,
labels.Set{"app.kubernetes.io/name": "pmem-csi-node"}.AsSelector(), cluster.NumNodes()-1, time.Minute)
framework.ExpectNoError(err, "All node drivers are not ready")
name, vol := v.create(22*1024*1024, nodeID)
defer v.remove(vol, name)
nodeID := v.publish(name, vol)
defer v.unpublish(vol, nodeID)
for _, p := range pods.Items {
if p.Spec.NodeName == nodeID {
restartPod = p.Name
}
}
// delete driver on node
err = e2epod.DeletePodWithWaitByName(f.ClientSet, d.Namespace, restartPod)
Expect(err).ShouldNot(HaveOccurred(), "Failed to stop driver pod %s", restartPod)
// Wait till the driver pod get restarted
err = e2epod.WaitForPodsReady(f.ClientSet, d.Namespace, restartPod, time.Now().Minute())
framework.ExpectNoError(err, "Node driver '%s' pod is not ready", restartPod)
_, err = ncc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "Failed to list volumes after reboot")
// Try republish
v.publish(name, vol)
})
It("capacity is restored after controller restart", func() {
By("Fetching pmem-csi-controller pod name")
pods, err := WaitForPodsWithLabelRunningReady(f.ClientSet, d.Namespace,
labels.Set{"app.kubernetes.io/name": "pmem-csi-controller"}.AsSelector(), 1 /* one replica */, time.Minute)
framework.ExpectNoError(err, "PMEM-CSI controller running with one replica")
controllerNode := pods.Items[0].Spec.NodeName
canRestartNode(controllerNode)
execOnTestNode("sync")
capacity, err := cc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
framework.ExpectNoError(err, "get capacity before restart")
rebooted = true
restartNode(f.ClientSet, controllerNode, sc)
_, err = WaitForPodsWithLabelRunningReady(f.ClientSet, d.Namespace,
labels.Set{"app.kubernetes.io/name": "pmem-csi-controller"}.AsSelector(), 1 /* one replica */, 5*time.Minute)
framework.ExpectNoError(err, "PMEM-CSI controller running again with one replica")
By("waiting for full capacity")
Eventually(func() int64 {
currentCapacity, err := cc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
if err != nil {
// Probably not running again yet.
return 0
}
return currentCapacity.AvailableCapacity
}, "3m", "5s").Should(Equal(capacity.AvailableCapacity), "total capacity after controller restart")
})
It("should return right capacity", func() {
resp, err := ncc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
Expect(err).Should(BeNil(), "Failed to get node initial capacity")
nodeCapacity := resp.AvailableCapacity
i := 0
volSize := int64(1) * 1024 * 1024 * 1024
// Keep creating volumes till there is change in node capacity
Eventually(func() bool {
volName := fmt.Sprintf("get-capacity-check-%d", i)
i++
By(fmt.Sprintf("creating volume '%s' on node '%s'", volName, nodeID))
req := &csi.CreateVolumeRequest{
Name: volName,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
},
CapacityRange: &csi.CapacityRange{
RequiredBytes: volSize,
},
}
volResp, err := ncc.CreateVolume(context.Background(), req)
Expect(err).Should(BeNil(), "Failed to create volume on node")
cl.MaybeRegisterVolume(volName, volResp, err)
resp, err := ncc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
Expect(err).Should(BeNil(), "Failed to get node capacity after volume creation")
// find if change in capacity
ret := nodeCapacity != resp.AvailableCapacity
// capture the new capacity
nodeCapacity = resp.AvailableCapacity
return ret
})
By("Getting controller capacity")
resp, err = cc.GetCapacity(context.Background(), &csi.GetCapacityRequest{
AccessibleTopology: &csi.Topology{
Segments: map[string]string{
"pmem-csi.intel.com/node": nodeID,
},
},
})
Expect(err).Should(BeNil(), "Failed to get capacity of controller")
Expect(resp.AvailableCapacity).To(Equal(nodeCapacity), "capacity mismatch")
})
It("excessive message sizes should be rejected", func() {
req := &csi.GetCapacityRequest{
AccessibleTopology: &csi.Topology{
Segments: map[string]string{},
},
}
for i := 0; i < 100000; i++ {
req.AccessibleTopology.Segments[fmt.Sprintf("pmem-csi.intel.com/node%d", i)] = nodeID
}
_, err := cc.GetCapacity(context.Background(), req)
Expect(err).ShouldNot(BeNil(), "unexpected success for too large request")
status, ok := status.FromError(err)
Expect(ok).Should(BeTrue(), "expected status in error, got: %v", err)
Expect(status.Message()).Should(ContainSubstring("grpc: received message larger than max"))
})
It("delete volume should fail with appropriate error", func() {
v.namePrefix = "delete-volume"
name, vol := v.create(2*1024*1024, nodeID)
// Publish for the second time.
nodeID := v.publish(name, vol)
_, err := v.cc.DeleteVolume(v.ctx, &csi.DeleteVolumeRequest{
VolumeId: vol.GetVolumeId(),
})
Expect(err).ShouldNot(BeNil(), fmt.Sprintf("Volume(%s) in use cannot be deleted", name))
s, ok := status.FromError(err)
Expect(ok).Should(BeTrue(), "Expected a status error")
Expect(s.Code()).Should(BeEquivalentTo(codes.FailedPrecondition), "Expected device busy error")
v.unpublish(vol, nodeID)
v.remove(vol, name)
})
It("CreateVolume should return ResourceExhausted", func() {
v.namePrefix = "resource-exhausted"
v.create(1024*1024*1024*1024*1024, nodeID, codes.ResourceExhausted)
})
It("stress test", func() {
// The load here consists of n workers which
// create and test volumes in parallel until
// we've created m volumes.
wg := sync.WaitGroup{}
volumes := int64(0)
volSize, err := resource.ParseQuantity(*sanityVolumeSize)
framework.ExpectNoError(err, "parsing pmem.sanity.volume-size parameter value %s", *sanityVolumeSize)
wg.Add(*numSanityWorkers)
// Constant time plus variable component for shredding.
// When using multiple workers, they either share IO bandwidth (parallel shredding)
// or do it sequentially, therefore we have to multiply by the maximum number
// of shredding operations.
secondsPerGigabyte := 10 * time.Second // 2s/GB masured for direct mode in a VM on a fast machine, probably slower elsewhere
timeout := 300*time.Second + time.Duration(int64(*numSanityWorkers)*volSize.Value()/1024/1024/1024)*secondsPerGigabyte
// The default depends on the driver deployment and thus has to be calculated here.
sanityVolumes := *numSanityVolumes
if sanityVolumes == 0 {
switch d.Mode {
case api.DeviceModeDirect:
// The minimum volume size in direct mode is 2GB, which makes
// testing a lot slower than in LVM mode. Therefore we create less
// volumes.
sanityVolumes = 20
default:
sanityVolumes = 100
}
}
// Also adapt the overall test timeout.
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(int64(timeout)*int64(sanityVolumes)))
defer cancel()
v.ctx = ctx
By(fmt.Sprintf("creating %d volumes of size %s in %d workers, with a timeout per volume of %s", sanityVolumes, volSize.String(), *numSanityWorkers, timeout))
for i := 0; i < *numSanityWorkers; i++ {
i := i
go func() {
// Order is relevant (first-in-last-out): when returning,
// we first let GinkgoRecover do its job, which
// may include marking the test as failed, then tell the
// parent goroutine that it can proceed.
defer func() {
By(fmt.Sprintf("worker-%d terminating", i))
wg.Done()
}()
defer GinkgoRecover() // must be invoked directly by defer, otherwise it doesn't see the panic
// Each worker must use its own pair of directories.
targetPath := fmt.Sprintf("%s/worker-%d", sc.TargetPath, i)
stagingPath := fmt.Sprintf("%s/worker-%d", sc.StagingPath, i)
execOnTestNode("mkdir", targetPath)
defer execOnTestNode("rmdir", targetPath)
execOnTestNode("mkdir", stagingPath)
defer execOnTestNode("rmdir", stagingPath)
for {
volume := atomic.AddInt64(&volumes, 1)
if volume > int64(sanityVolumes) {
return
}
lv := v
lv.namePrefix = fmt.Sprintf("worker-%d-volume-%d", i, volume)
lv.targetPath = targetPath
lv.stagingPath = stagingPath
func() {
ctx, cancel := context.WithTimeout(v.ctx, timeout)
start := time.Now()
success := false
defer func() {
cancel()
if !success {
duration := time.Since(start)
By(fmt.Sprintf("%s: failed after %s", lv.namePrefix, duration))
// Stop testing.
atomic.AddInt64(&volumes, int64(sanityVolumes))
}
}()
lv.ctx = ctx
volName, vol := lv.create(volSize.Value(), nodeID)
lv.publish(volName, vol)
lv.unpublish(vol, nodeID)
lv.remove(vol, volName)
// Success!
duration := time.Since(start)
success = true
By(fmt.Sprintf("%s: done, in %s", lv.namePrefix, duration))
}()
}
}()
}
wg.Wait()
})
Context("cluster", func() {
type nodeClient struct {
host string
conn *grpc.ClientConn
nc csi.NodeClient
cc csi.ControllerClient
volumes []*csi.ListVolumesResponse_Entry
}
var (
nodes map[string]nodeClient
)
BeforeEach(func() {
// Worker nodes with PMEM.
nodes = make(map[string]nodeClient)
for i := 1; i < cluster.NumNodes(); i++ {
addr := cluster.NodeServiceAddress(i, deploy.SocatPort)
conn, err := sanityutils.Connect(addr, grpc.WithInsecure())
framework.ExpectNoError(err, "connect to socat instance on node #%d via %s", i, addr)
node := nodeClient{
host: addr,
conn: conn,
nc: csi.NewNodeClient(conn),
cc: csi.NewControllerClient(conn),
}
info, err := node.nc.NodeGetInfo(context.Background(), &csi.NodeGetInfoRequest{})
framework.ExpectNoError(err, "node name #%d", i+1)
initialVolumes, err := node.cc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "list volumes on node #%d", i)
node.volumes = initialVolumes.Entries
//nodes = append(nodes, node)
nodes[info.NodeId] = node
}
})
AfterEach(func() {
for _, node := range nodes {
node.conn.Close()
}
})
It("supports persistent volumes", func() {
sizeInBytes := int64(33 * 1024 * 1024)
volName, vol := v.create(sizeInBytes, "")
Expect(len(vol.AccessibleTopology)).To(Equal(1), "accessible topology mismatch")
volNodeName := vol.AccessibleTopology[0].Segments["pmem-csi.intel.com/node"]
Expect(volNodeName).NotTo(BeNil(), "wrong topology")
// Node now should have one additional volume only one node,
// and its size should match the requested one.
for nodeName, node := range nodes {
currentVolumes, err := node.cc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "list volumes on node %s", nodeName)
if nodeName == volNodeName {
Expect(len(currentVolumes.Entries)).To(Equal(len(node.volumes)+1), "one additional volume on node %s", nodeName)
for _, e := range currentVolumes.Entries {
if e.Volume.VolumeId == vol.VolumeId {
Expect(e.Volume.CapacityBytes).To(Equal(sizeInBytes), "additional volume size on node #%s(%s)", nodeName, node.host)
break
}
}
} else {
// ensure that no new volume on other nodes
Expect(len(currentVolumes.Entries)).To(Equal(len(node.volumes)), "volume count mismatch on node %s", nodeName)
}
}
v.remove(vol, volName)
})
It("persistent volume retains data", func() {
sizeInBytes := int64(33 * 1024 * 1024)
volName, vol := v.create(sizeInBytes, nodeID)
Expect(len(vol.AccessibleTopology)).To(Equal(1), "accessible topology mismatch")
Expect(vol.AccessibleTopology[0].Segments["pmem-csi.intel.com/node"]).To(Equal(nodeID), "unexpected node")
// Node now should have one additional volume only one node,
// and its size should match the requested one.
node := nodes[nodeID]
currentVolumes, err := node.cc.ListVolumes(context.Background(), &csi.ListVolumesRequest{})
framework.ExpectNoError(err, "list volumes on node %s", nodeID)
Expect(len(currentVolumes.Entries)).To(Equal(len(node.volumes)+1), "one additional volume on node %s", nodeID)
for _, e := range currentVolumes.Entries {
if e.Volume.VolumeId == vol.VolumeId {
Expect(e.Volume.CapacityBytes).To(Equal(sizeInBytes), "additional volume size on node #%s(%s)", nodeID, node.host)
break
}
}
v.publish(volName, vol)
i := strings.Split(nodeID, "worker")[1]
sshcmd := fmt.Sprintf("%s/_work/%s/ssh.%s", os.Getenv("REPO_ROOT"), os.Getenv("CLUSTER"), i)
// write some data to mounted volume
cmd := "sudo sh -c 'echo -n hello > " + v.getTargetPath() + "/target/test-file'"
ssh := exec.Command(sshcmd, cmd)
out, err := ssh.CombinedOutput()
framework.ExpectNoError(err, "write failure:\n%s", string(out))
// unmount volume
v.unpublish(vol, nodeID)
// republish volume
v.publish(volName, vol)
// ensure the data retained
cmd = "sudo cat " + v.getTargetPath() + "/target/test-file"
ssh = exec.Command(sshcmd, cmd)
out, err = ssh.CombinedOutput()
framework.ExpectNoError(err, "read failure:\n%s", string(out))
Expect(string(out)).To(Equal("hello"), "read failure")
// end of test cleanup
v.unpublish(vol, nodeID)
v.remove(vol, volName)
})
Context("ephemeral volumes", func() {
doit := func(withFlag bool, repeatCalls int) {
targetPath := sc.TargetPath + "/ephemeral"
params := map[string]string{
"size": "1Mi",
}
if withFlag {
params["csi.storage.k8s.io/ephemeral"] = "true"
}
req := csi.NodePublishVolumeRequest{
VolumeId: "fake-ephemeral-volume-id",
VolumeContext: params,
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
TargetPath: targetPath,
}
published := false
var failedPublish, failedUnpublish error
for i := 0; i < repeatCalls; i++ {
_, err := nc.NodePublishVolume(context.Background(), &req)
if err == nil {
published = true
break
} else if failedPublish == nil {
failedPublish = fmt.Errorf("NodePublishVolume for ephemeral volume, attempt #%d: %v", i, err)
}
}
if published {
req := csi.NodeUnpublishVolumeRequest{
VolumeId: "fake-ephemeral-volume-id",
TargetPath: targetPath,
}
for i := 0; i < repeatCalls; i++ {
_, err := nc.NodeUnpublishVolume(context.Background(), &req)
if err != nil && failedUnpublish == nil {
failedUnpublish = fmt.Errorf("NodeUnpublishVolume for ephemeral volume, attempt #%d: %v", i, err)
}
}
}
framework.ExpectNoError(failedPublish)
framework.ExpectNoError(failedUnpublish)
}
doall := func(withFlag bool) {
It("work", func() {
doit(withFlag, 1)
})
It("are idempotent", func() {
doit(withFlag, 10)
})
}
Context("with csi.storage.k8s.io/ephemeral", func() {
doall(true)
})
Context("without csi.storage.k8s.io/ephemeral", func() {
doall(false)
})
})
})
})
})
type volume struct {
namePrefix string
ctx context.Context
sc *sanity.TestContext
cc csi.ControllerClient
nc csi.NodeClient
cl *sanity.Cleanup
stagingPath string
targetPath string
}
func (v volume) getStagingPath() string {
if v.stagingPath != "" {
return v.stagingPath
}
return v.sc.StagingPath
}
func (v volume) getTargetPath() string {
if v.targetPath != "" {
return v.targetPath
}
return v.sc.TargetPath
}
func (v volume) create(sizeInBytes int64, nodeID string, expectedStatus ...codes.Code) (string, *csi.Volume) {
var err error
name := sanity.UniqueString(v.namePrefix)
// Create Volume First
create := fmt.Sprintf("%s: creating a single node writer volume", v.namePrefix)
By(create)
req := &csi.CreateVolumeRequest{
Name: name,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
},
CapacityRange: &csi.CapacityRange{
RequiredBytes: sizeInBytes,
},
Parameters: v.sc.Config.TestVolumeParameters,
}
if nodeID != "" {
req.AccessibilityRequirements = &csi.TopologyRequirement{
Requisite: []*csi.Topology{
{
Segments: map[string]string{
"pmem-csi.intel.com/node": nodeID,
},
},
},
Preferred: []*csi.Topology{
{
Segments: map[string]string{
"pmem-csi.intel.com/node": nodeID,
},
},
},
}
}
var vol *csi.CreateVolumeResponse
if len(expectedStatus) > 0 {
// Expected to fail, no retries.
vol, err = v.cc.CreateVolume(v.ctx, req)
} else {
// With retries.
err = v.retry(func() error {
vol, err = v.cc.CreateVolume(
v.ctx, req,
)
return err
}, "CreateVolume")
}
v.cl.MaybeRegisterVolume(name, vol, err)
if len(expectedStatus) > 0 {
framework.ExpectError(err, create)
status, ok := status.FromError(err)
Expect(ok).To(BeTrue(), "have gRPC status error")
Expect(status.Code()).To(Equal(expectedStatus[0]), "expected gRPC status code")
return name, nil
}
framework.ExpectNoError(err, create)
Expect(vol).NotTo(BeNil())
Expect(vol.GetVolume()).NotTo(BeNil())
Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty())
Expect(vol.GetVolume().GetCapacityBytes()).To(Equal(sizeInBytes), "volume capacity")
return name, vol.GetVolume()
}
func (v volume) publish(name string, vol *csi.Volume) string {
var err error
By(fmt.Sprintf("%s: getting a node id", v.namePrefix))
nid, err := v.nc.NodeGetInfo(
v.ctx,
&csi.NodeGetInfoRequest{})
framework.ExpectNoError(err, "get node ID")
Expect(nid).NotTo(BeNil())
Expect(nid.GetNodeId()).NotTo(BeEmpty())
var conpubvol *csi.ControllerPublishVolumeResponse
stage := fmt.Sprintf("%s: node staging volume", v.namePrefix)
By(stage)
var nodestagevol interface{}
err = v.retry(func() error {
nodestagevol, err = v.nc.NodeStageVolume(
v.ctx,
&csi.NodeStageVolumeRequest{
VolumeId: vol.GetVolumeId(),
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
StagingTargetPath: v.getStagingPath(),
VolumeContext: vol.GetVolumeContext(),
PublishContext: conpubvol.GetPublishContext(),
},
)
return err
}, "NodeStageVolume")
framework.ExpectNoError(err, stage)
Expect(nodestagevol).NotTo(BeNil())
// NodePublishVolume
publish := fmt.Sprintf("%s: publishing the volume on a node", v.namePrefix)
By(publish)
var nodepubvol interface{}
v.retry(func() error {
nodepubvol, err = v.nc.NodePublishVolume(
v.ctx,
&csi.NodePublishVolumeRequest{
VolumeId: vol.GetVolumeId(),
TargetPath: v.getTargetPath() + "/target",
StagingTargetPath: v.getStagingPath(),
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
VolumeContext: vol.GetVolumeContext(),
PublishContext: conpubvol.GetPublishContext(),
},
)
return err
}, "NodePublishVolume")
framework.ExpectNoError(err, publish)
Expect(nodepubvol).NotTo(BeNil())
return nid.GetNodeId()
}
func (v volume) unpublish(vol *csi.Volume, nodeID string) {
var err error
unpublish := fmt.Sprintf("%s: cleaning up calling nodeunpublish", v.namePrefix)
By(unpublish)
var nodeunpubvol interface{}
err = v.retry(func() error {
nodeunpubvol, err = v.nc.NodeUnpublishVolume(
v.ctx,
&csi.NodeUnpublishVolumeRequest{
VolumeId: vol.GetVolumeId(),
TargetPath: v.getTargetPath() + "/target",
})
return err
}, "NodeUnpublishVolume")
framework.ExpectNoError(err, unpublish)
Expect(nodeunpubvol).NotTo(BeNil())
unstage := fmt.Sprintf("%s: cleaning up calling nodeunstage", v.namePrefix)
By(unstage)
var nodeunstagevol interface{}
err = v.retry(func() error {
nodeunstagevol, err = v.nc.NodeUnstageVolume(
v.ctx,
&csi.NodeUnstageVolumeRequest{
VolumeId: vol.GetVolumeId(),
StagingTargetPath: v.getStagingPath(),
},
)
return err
}, "NodeUnstageVolume")
framework.ExpectNoError(err, unstage)
Expect(nodeunstagevol).NotTo(BeNil())
}
func (v volume) remove(vol *csi.Volume, volName string) {
var err error
delete := fmt.Sprintf("%s: deleting the volume %s", v.namePrefix, vol.GetVolumeId())
By(delete)
var deletevol interface{}
err = v.retry(func() error {
deletevol, err = v.cc.DeleteVolume(
v.ctx,
&csi.DeleteVolumeRequest{
VolumeId: vol.GetVolumeId(),
},
)
return err
}, "DeleteVolume")
framework.ExpectNoError(err, delete)
Expect(deletevol).NotTo(BeNil())
v.cl.UnregisterVolume(volName)
}
// retry will execute the operation (rapidly initially, then with
// exponential backoff) until it succeeds or the context times
// out. Each failure gets logged.
func (v volume) retry(operation func() error, what string) error {
if v.ctx.Err() != nil {
return fmt.Errorf("%s: not calling %s, the deadline has been reached already", v.namePrefix, what)
}
// Something failed. Retry with exponential backoff.
// TODO: use wait.NewExponentialBackoffManager once we use K8S v1.18.
backoff := NewExponentialBackoffManager(
time.Second, // initial backoff
10*time.Second, // maximum backoff
30*time.Second, // reset duration
2, // backoff factor
0, // no jitter
clock.RealClock{})
for i := 0; ; i++ {
err := operation()
if err == nil {
return nil
}
framework.Logf("%s: %s failed at attempt %#d: %v", v.namePrefix, what, i, err)
select {
case <-v.ctx.Done():
framework.Logf("%s: %s failed %d times and deadline exceeded, giving up after error: %v", v.namePrefix, what, i+1, err)
return err
case <-backoff.Backoff().C():
}
}
}
func canRestartNode(nodeID string) {
if !regexp.MustCompile(`worker\d+$`).MatchString(nodeID) {
skipper.Skipf("node %q not one of the expected QEMU nodes (worker<number>))", nodeID)
}
}
// restartNode works only for one of the nodes in the QEMU virtual cluster.
// It does a hard poweroff via SysRq and relies on Docker to restart the
// "failed" node.
func restartNode(cs clientset.Interface, nodeID string, sc *sanity.TestContext) {
cc := csi.NewControllerClient(sc.ControllerConn)
capacity, err := cc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
framework.ExpectNoError(err, "get capacity before restart")
node := strings.Split(nodeID, "worker")[1]
ssh := fmt.Sprintf("%s/_work/%s/ssh.%s",
os.Getenv("REPO_ROOT"),
os.Getenv("CLUSTER"),
node)
// We detect a successful reboot because a temporary file in the
// tmpfs /run will be gone after the reboot.
out, err := exec.Command(ssh, "sudo", "touch", "/run/delete-me").CombinedOutput()
framework.ExpectNoError(err, "%s touch /run/delete-me:\n%s", ssh, string(out))
// Shutdown via SysRq b (https://major.io/2009/01/29/linux-emergency-reboot-or-shutdown-with-magic-commands/).
// TCPKeepAlive is necessary because otherwise ssh can hang for a long time when the remote end dies without
// closing the TCP connection.
By(fmt.Sprintf("shutting down node %s", nodeID))
shutdown := exec.Command(ssh)
shutdown.Stdin = bytes.NewBufferString(`sudo sh -c 'echo 1 > /proc/sys/kernel/sysrq'
sudo sh -c 'echo b > /proc/sysrq-trigger'`)
out, _ = shutdown.CombinedOutput()
// Wait for node to reboot.
By("waiting for node to restart")
Eventually(func() bool {
test := exec.Command(ssh)
test.Stdin = bytes.NewBufferString("test ! -e /run/delete-me")
out, err := test.CombinedOutput()
if err == nil {
return true
}
framework.Logf("test for /run/delete-me with %s:\n%s\n%s", ssh, err, out)
return false
}, "5m", "1s").Should(Equal(true), "node up again")
By("Node reboot success! Waiting for driver restore connections")
Eventually(func() int64 {
currentCapacity, err := cc.GetCapacity(context.Background(), &csi.GetCapacityRequest{})
if err != nil {
// Probably not running again yet.
return 0
}
return currentCapacity.AvailableCapacity
}, "3m", "2s").Should(Equal(capacity.AvailableCapacity), "total capacity after node restart")
By("Probing node")
Eventually(func() bool {
if _, err := csi.NewIdentityClient(sc.Conn).Probe(context.Background(), &csi.ProbeRequest{}); err != nil {
return false
}
By("Node driver: Probe success")
return true
}, "5m", "2s").Should(Equal(true), "node driver not ready")
}
// This is a copy from framework/utils.go with the fix from https://github.com/kubernetes/kubernetes/pull/78687
// TODO: update to Kubernetes 1.15 (assuming that PR gets merged in time for that) and remove this function.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(2*time.Second, timeout,
func() (bool, error) {
pods, err = e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil {
framework.Logf("Failed to list pods: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
framework.Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
| [
"\"REPO_ROOT\"",
"\"CLUSTER\"",
"\"REPO_ROOT\"",
"\"CLUSTER\""
] | [] | [
"CLUSTER",
"REPO_ROOT"
] | [] | ["CLUSTER", "REPO_ROOT"] | go | 2 | 0 | |
src/python/importer.py | # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import importlib
import importlib.abc
import importlib.util
import os
class ByteCodeLoader(importlib.abc.Loader):
def __init__(self, code):
super().__init__()
self.code = code
def exec_module(self, module):
exec(self.code, module.__dict__)
# Simple importer that allows python to import data from a dict of
# code objects. The keys are the module path, and the items are the
# filename and bytecode of the file.
class CodeImporter(object):
def __init__(self):
self.modules = {}
override_var = os.environ.get('M5_OVERRIDE_PY_SOURCE', 'false')
self.override = (override_var.lower() in ('true', 'yes'))
def add_module(self, abspath, modpath, code):
if modpath in self.modules:
raise AttributeError("%s already found in importer" % modpath)
self.modules[modpath] = (abspath, code)
def find_spec(self, fullname, path, target=None):
if fullname not in self.modules:
return None
abspath, code = self.modules[fullname]
if self.override and os.path.exists(abspath):
src = open(abspath, 'r').read()
code = compile(src, abspath, 'exec')
is_package = (os.path.basename(abspath) == '__init__.py')
spec = importlib.util.spec_from_loader(
name=fullname, loader=ByteCodeLoader(code),
is_package=is_package)
spec.loader_state = self.modules.keys()
return spec
# Create an importer and add it to the meta_path so future imports can
# use it. There's currently nothing in the importer, but calls to
# add_module can be used to add code.
def install():
importer = CodeImporter()
global add_module
add_module = importer.add_module
import sys
sys.meta_path.insert(0, importer)
# Injected into this module's namespace by the c++ code that loads it.
_init_all_embedded()
| [] | [] | [
"M5_OVERRIDE_PY_SOURCE"
] | [] | ["M5_OVERRIDE_PY_SOURCE"] | python | 1 | 0 | |
main.go | package main
//all made to work even if there is no db
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
)
type HTTPHandler struct {
token string
}
func (h *HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, h.token)
}
func main() {
log.Println("v 1.0")
log.Println("If you run this in production, you are opening the gates")
weakprotection := os.Getenv("WEAKPROTECTION")
if weakprotection == "" {
weakprotection = "unsecure"
}
debug := os.Getenv("DEBUG")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.RequestURI, "favicon.ico") {
return
}
if err := r.ParseForm(); err != nil {
log.Printf("ParseForm() err: %v", err)
fmt.Fprintf(w, "Internal error, check logs")
return
}
if debug == "on" {
log.Printf("%v", r.PostForm)
}
value := r.FormValue("WEAKPROTECTION")
if value == "" || r.FormValue("WEAKPROTECTION") != weakprotection {
log.Println("Got unvalidated request")
if debug == "on" {
log.Printf("%s (<<or empty) != %s", value, weakprotection)
}
fmt.Fprint(w, `<!doctype html>
<html lang="en">
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Bootstrap CSS -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<title>Get your unsecure token here</title>
</head>
<body onLoad="document.getElementById('token').select()">
<div class="container">
<div class="row">
<div class="col">
<h1>Don't run me in ANY Production Environment</h1>
</div>
</div>
<div class="row">
<div class="col">
<form method="post">
<div class="mb-3">
<label for="weaktoken" class="form-label">Weak Password:</label>
<input type="password" class="form-control" id="WEAKPROTECTION" name="WEAKPROTECTION" placeholder="password">
</div>
<button type="submit" class="btn btn-primary">Submit</button>
</form>
</div>
</div>
</div>
</body>
</html>
`)
} else {
token := "couldn't read token, make sure to inject a service account"
tokenpath := "/var/run/secrets/kubernetes.io/serviceaccount/token"
content, err := ioutil.ReadFile(tokenpath)
if err == nil {
token = string(content)
log.Println("Serving token")
} else {
log.Printf("Couldn't read token on path %s", tokenpath)
log.Println(err)
}
fmt.Fprintf(w, `<!doctype html>
<html lang="en">
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Bootstrap CSS -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<title>Get your unsecure token here</title>
</head>
<body onLoad="document.getElementById('token').select()">
<div class="container">
<div class="row">
<div class="col">
<h1>Don't run me in ANY Production Environment</h1>
</div>
</div>
<div class="row">
<div class="col">
<div class="mb-3">
<label for="token" class="form-label">Get your unsecure token here</label>
<textarea onClick="this.select();" class="form-control" id="token" rows="10">%s</textarea>
</div>
</div>
</div>
</div>
</body>
</html>`, token)
}
})
log.Println("Starting on port 8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}
| [
"\"WEAKPROTECTION\"",
"\"DEBUG\""
] | [] | [
"WEAKPROTECTION",
"DEBUG"
] | [] | ["WEAKPROTECTION", "DEBUG"] | go | 2 | 0 | |
tests/unit/gapic/compute_v1/test_firewalls.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests.sessions import Session
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.firewalls import FirewallsClient
from google.cloud.compute_v1.services.firewalls import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FirewallsClient._get_default_mtls_endpoint(None) is None
assert FirewallsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
FirewallsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FirewallsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert FirewallsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_firewalls_client_from_service_account_info():
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = FirewallsClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize("client_class", [FirewallsClient,])
def test_firewalls_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_client_get_transport_class():
transport = FirewallsClient.get_transport_class()
available_transports = [
transports.FirewallsRestTransport,
]
assert transport in available_transports
transport = FirewallsClient.get_transport_class("rest")
assert transport == transports.FirewallsRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
def test_firewalls_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FirewallsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FirewallsClient, transports.FirewallsRestTransport, "rest", "true"),
(FirewallsClient, transports.FirewallsRestTransport, "rest", "false"),
],
)
@mock.patch.object(
FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_firewalls_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(FirewallsClient, transports.FirewallsRestTransport, "rest"),],
)
def test_firewalls_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_delete_rest(
transport: str = "rest", request_type=compute.DeleteFirewallRequest
):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_delete_rest_from_dict():
test_delete_rest(request_type=dict)
def test_delete_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete(
project="project_value", firewall="firewall_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "firewall_value" in http_call[1] + str(body)
def test_delete_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete(
compute.DeleteFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_get_rest(transport: str = "rest", request_type=compute.GetFirewallRequest):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")],
creation_timestamp="creation_timestamp_value",
denied=[compute.Denied(i_p_protocol="i_p_protocol_value")],
description="description_value",
destination_ranges=["destination_ranges_value"],
direction=compute.Firewall.Direction.EGRESS,
disabled=True,
id="id_value",
kind="kind_value",
log_config=compute.FirewallLogConfig(enable=True),
name="name_value",
network="network_value",
priority=898,
self_link="self_link_value",
source_ranges=["source_ranges_value"],
source_service_accounts=["source_service_accounts_value"],
source_tags=["source_tags_value"],
target_service_accounts=["target_service_accounts_value"],
target_tags=["target_tags_value"],
)
# Wrap the value into a proper Response obj
json_return_value = compute.Firewall.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Firewall)
assert response.allowed == [compute.Allowed(i_p_protocol="i_p_protocol_value")]
assert response.creation_timestamp == "creation_timestamp_value"
assert response.denied == [compute.Denied(i_p_protocol="i_p_protocol_value")]
assert response.description == "description_value"
assert response.destination_ranges == ["destination_ranges_value"]
assert response.direction == compute.Firewall.Direction.EGRESS
assert response.disabled is True
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.log_config == compute.FirewallLogConfig(enable=True)
assert response.name == "name_value"
assert response.network == "network_value"
assert response.priority == 898
assert response.self_link == "self_link_value"
assert response.source_ranges == ["source_ranges_value"]
assert response.source_service_accounts == ["source_service_accounts_value"]
assert response.source_tags == ["source_tags_value"]
assert response.target_service_accounts == ["target_service_accounts_value"]
assert response.target_tags == ["target_tags_value"]
def test_get_rest_from_dict():
test_get_rest(request_type=dict)
def test_get_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Firewall()
# Wrap the value into a proper Response obj
json_return_value = compute.Firewall.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get(
project="project_value", firewall="firewall_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "firewall_value" in http_call[1] + str(body)
def test_get_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetFirewallRequest(),
project="project_value",
firewall="firewall_value",
)
def test_insert_rest(
transport: str = "rest", request_type=compute.InsertFirewallRequest
):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_insert_rest_from_dict():
test_insert_rest(request_type=dict)
def test_insert_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
firewall_resource = compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
)
client.insert(
project="project_value", firewall_resource=firewall_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert compute.Firewall.to_json(
firewall_resource, including_default_value_fields=False
) in http_call[1] + str(body)
def test_insert_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert(
compute.InsertFirewallRequest(),
project="project_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
),
)
def test_list_rest(transport: str = "rest", request_type=compute.ListFirewallsRequest):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList(
id="id_value",
items=[
compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
)
],
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
warning=compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED),
)
# Wrap the value into a proper Response obj
json_return_value = compute.FirewallList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
assert response.raw_page is response
# Establish that the response is the type that we expect.
assert isinstance(response, compute.FirewallList)
assert response.id == "id_value"
assert response.items == [
compute.Firewall(allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")])
]
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.warning == compute.Warning(code=compute.Warning.Code.CLEANUP_FAILED)
def test_list_rest_from_dict():
test_list_rest(request_type=dict)
def test_list_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.FirewallList()
# Wrap the value into a proper Response obj
json_return_value = compute.FirewallList.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list(project="project_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
def test_list_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListFirewallsRequest(), project="project_value",
)
def test_patch_rest(transport: str = "rest", request_type=compute.PatchFirewallRequest):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.patch(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_patch_rest_from_dict():
test_patch_rest(request_type=dict)
def test_patch_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
firewall_resource = compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
)
client.patch(
project="project_value",
firewall="firewall_value",
firewall_resource=firewall_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "firewall_value" in http_call[1] + str(body)
assert compute.Firewall.to_json(
firewall_resource, including_default_value_fields=False
) in http_call[1] + str(body)
def test_patch_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.patch(
compute.PatchFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
),
)
def test_update_rest(
transport: str = "rest", request_type=compute.UpdateFirewallRequest
):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
error=compute.Error(errors=[compute.Errors(code="code_value")]),
http_error_message="http_error_message_value",
http_error_status_code=2374,
id="id_value",
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id="target_id_value",
target_link="target_link_value",
user="user_value",
warnings=[compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)],
zone="zone_value",
)
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.update(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.error == compute.Error(errors=[compute.Errors(code="code_value")])
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == "id_value"
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == "target_id_value"
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.warnings == [
compute.Warnings(code=compute.Warnings.Code.CLEANUP_FAILED)
]
assert response.zone == "zone_value"
def test_update_rest_from_dict():
test_update_rest(request_type=dict)
def test_update_rest_flattened():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
json_return_value = compute.Operation.to_json(return_value)
response_value = Response()
response_value.status_code = 200
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
firewall_resource = compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
)
client.update(
project="project_value",
firewall="firewall_value",
firewall_resource=firewall_resource,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, http_call, http_params = req.mock_calls[0]
body = http_params.get("json")
assert "project_value" in http_call[1] + str(body)
assert "firewall_value" in http_call[1] + str(body)
assert compute.Firewall.to_json(
firewall_resource, including_default_value_fields=False
) in http_call[1] + str(body)
def test_update_rest_flattened_error():
client = FirewallsClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update(
compute.UpdateFirewallRequest(),
project="project_value",
firewall="firewall_value",
firewall_resource=compute.Firewall(
allowed=[compute.Allowed(i_p_protocol="i_p_protocol_value")]
),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FirewallsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirewallsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FirewallsRestTransport(
credentials=credentials.AnonymousCredentials(),
)
client = FirewallsClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.FirewallsRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_firewalls_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.FirewallsTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_firewalls_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FirewallsTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
"patch",
"update",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_firewalls_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_firewalls_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.FirewallsTransport()
adc.assert_called_once()
def test_firewalls_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
FirewallsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_firewalls_http_transport_client_cert_source_for_mtls():
cred = credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.FirewallsRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_firewalls_host_no_port():
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_firewalls_host_with_port():
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FirewallsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = FirewallsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = FirewallsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = FirewallsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = FirewallsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = FirewallsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = FirewallsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = FirewallsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FirewallsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = FirewallsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FirewallsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
client = FirewallsClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FirewallsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FirewallsClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
python/ray/tests/test_multi_node.py | import os
import pytest
import psutil
import sys
import time
import ray
from ray import ray_constants
from ray._private.test_utils import (
RayTestTimeoutException, run_string_as_driver,
run_string_as_driver_nonblocking, init_error_pubsub, get_error_message,
object_memory_usage, wait_for_condition)
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --num-cpus=1 --min-worker-port=0 "
"--max-worker-port=0 --port 0",
],
indirect=True)
def test_cleanup_on_driver_exit(call_ray_start):
# This test will create a driver that creates a bunch of objects and then
# exits. The entries in the object table should be cleaned up.
address = call_ray_start
ray.init(address=address)
# Define a driver that creates a bunch of objects and exits.
driver_script = """
import time
import ray
import numpy as np
from ray._private.test_utils import object_memory_usage
import os
ray.init(address="{}")
object_refs = [ray.put(np.zeros(200 * 1024, dtype=np.uint8))
for i in range(1000)]
start_time = time.time()
while time.time() - start_time < 30:
if object_memory_usage() > 0:
break
else:
raise Exception("Objects did not appear in object table.")
@ray.remote
def f():
time.sleep(1)
print("success")
# Submit some tasks without waiting for them to finish. Their workers should
# still get cleaned up eventually, even if they get started after the driver
# exits.
[f.remote() for _ in range(10)]
""".format(address)
out = run_string_as_driver(driver_script)
assert "success" in out
# Make sure the objects are removed from the object table.
start_time = time.time()
while time.time() - start_time < 30:
if object_memory_usage() == 0:
break
else:
raise Exception("Objects were not all removed from object table.")
def all_workers_exited():
result = True
print("list of idle workers:")
for proc in psutil.process_iter():
if ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER in proc.name():
print(f"{proc}")
result = False
return result
# Check that workers are eventually cleaned up.
wait_for_condition(all_workers_exited, timeout=15, retry_interval_ms=1000)
def test_error_isolation(call_ray_start):
address = call_ray_start
# Connect a driver to the Ray cluster.
ray.init(address=address)
# If a GRPC call exceeds timeout, the calls is cancelled at client side but
# server may still reply to it, leading to missed message. Using a sequence
# number to ensure no message is dropped can be the long term solution,
# but its complexity and the fact the Ray subscribers do not use deadline
# in production makes it less preferred.
# Therefore, a simpler workaround is used instead: a different subscriber
# is used for each get_error_message() call.
subscribers = [init_error_pubsub() for _ in range(3)]
# There shouldn't be any errors yet.
errors = get_error_message(subscribers[0], 1, timeout=2)
assert len(errors) == 0
error_string1 = "error_string1"
error_string2 = "error_string2"
@ray.remote
def f():
raise Exception(error_string1)
# Run a remote function that throws an error.
with pytest.raises(Exception):
ray.get(f.remote())
# Wait for the error to appear in Redis.
errors = get_error_message(subscribers[1], 1)
# Make sure we got the error.
assert len(errors) == 1
assert error_string1 in errors[0].error_message
# Start another driver and make sure that it does not receive this
# error. Make the other driver throw an error, and make sure it
# receives that error.
driver_script = """
import ray
import time
from ray._private.test_utils import init_error_pubsub, get_error_message
ray.init(address="{}")
subscribers = [init_error_pubsub() for _ in range(2)]
time.sleep(1)
errors = get_error_message(subscribers[0], 1, timeout=2)
assert len(errors) == 0
@ray.remote
def f():
raise Exception("{}")
try:
ray.get(f.remote())
except Exception as e:
pass
errors = get_error_message(subscribers[1], 1)
assert len(errors) == 1
assert "{}" in errors[0].error_message
print("success")
""".format(address, error_string2, error_string2)
out = run_string_as_driver(driver_script)
# Make sure the other driver succeeded.
assert "success" in out
# Make sure that the other error message doesn't show up for this
# driver.
errors = get_error_message(subscribers[2], 1)
assert len(errors) == 1
def test_remote_function_isolation(call_ray_start):
# This test will run multiple remote functions with the same names in
# two different drivers. Connect a driver to the Ray cluster.
address = call_ray_start
ray.init(address=address)
# Start another driver and make sure that it can define and call its
# own commands with the same names.
driver_script = """
import ray
import time
ray.init(address="{}")
@ray.remote
def f():
return 3
@ray.remote
def g(x, y):
return 4
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0, 0)])
assert result == [3, 4]
print("success")
""".format(address)
out = run_string_as_driver(driver_script)
@ray.remote
def f():
return 1
@ray.remote
def g(x):
return 2
for _ in range(10000):
result = ray.get([f.remote(), g.remote(0)])
assert result == [1, 2]
# Make sure the other driver succeeded.
assert "success" in out
def test_driver_exiting_quickly(call_ray_start):
# This test will create some drivers that submit some tasks and then
# exit without waiting for the tasks to complete.
address = call_ray_start
ray.init(address=address)
# Define a driver that creates an actor and exits.
driver_script1 = """
import ray
ray.init(address="{}")
@ray.remote
class Foo:
def __init__(self):
pass
Foo.remote()
print("success")
""".format(address)
# Define a driver that creates some tasks and exits.
driver_script2 = """
import ray
ray.init(address="{}")
@ray.remote
def f():
return 1
f.remote()
print("success")
""".format(address)
# Create some drivers and let them exit and make sure everything is
# still alive.
for _ in range(3):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
out = run_string_as_driver(driver_script2)
# Make sure the first driver ran to completion.
assert "success" in out
def test_drivers_named_actors(call_ray_start):
# This test will create some drivers that submit some tasks to the same
# named actor.
address = call_ray_start
ray.init(address=address, namespace="test")
# Define a driver that creates a named actor then sleeps for a while.
driver_script1 = """
import ray
import time
ray.init(address="{}", namespace="test")
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
return self.count
counter = Counter.options(name="Counter").remote()
time.sleep(100)
""".format(address)
# Define a driver that submits to the named actor and exits.
driver_script2 = """
import ray
import time
ray.init(address="{}", namespace="test")
while True:
try:
counter = ray.get_actor("Counter")
break
except ValueError:
time.sleep(1)
assert ray.get(counter.increment.remote()) == {}
print("success")
""".format(address, "{}")
process_handle = run_string_as_driver_nonblocking(driver_script1)
for i in range(3):
driver_script = driver_script2.format(i + 1)
out = run_string_as_driver(driver_script)
assert "success" in out
process_handle.kill()
def test_receive_late_worker_logs():
# Make sure that log messages from tasks appear in the stdout even if the
# script exits quickly.
log_message = "some helpful debugging message"
# Define a driver that creates a task that prints something, ensures that
# the task runs, and then exits.
driver_script = """
import ray
import random
import time
log_message = "{}"
@ray.remote
class Actor:
def log(self):
print(log_message)
@ray.remote
def f():
print(log_message)
ray.init(num_cpus=2)
a = Actor.remote()
ray.get([a.log.remote(), f.remote()])
ray.get([a.log.remote(), f.remote()])
""".format(log_message)
for _ in range(2):
out = run_string_as_driver(driver_script)
assert out.count(log_message) == 4
@pytest.mark.parametrize(
"call_ray_start", [
"ray start --head --num-cpus=1 --num-gpus=1 " +
"--min-worker-port=0 --max-worker-port=0 --port 0"
],
indirect=True)
def test_drivers_release_resources(call_ray_start):
address = call_ray_start
# Define a driver that creates an actor and exits.
driver_script1 = """
import time
import ray
ray.init(address="{}")
@ray.remote
def f(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
def g(duration):
time.sleep(duration)
@ray.remote(num_gpus=1)
class Foo:
def __init__(self):
pass
# Make sure some resources are available for us to run tasks.
ray.get(f.remote(0))
ray.get(g.remote(0))
# Start a bunch of actors and tasks that use resources. These should all be
# cleaned up when this driver exits.
foos = [Foo.remote() for _ in range(100)]
[f.remote(10 ** 6) for _ in range(100)]
print("success")
""".format(address)
driver_script2 = (driver_script1 +
"import sys\nsys.stdout.flush()\ntime.sleep(10 ** 6)\n")
def wait_for_success_output(process_handle, timeout=10):
# Wait until the process prints "success" and then return.
start_time = time.time()
while time.time() - start_time < timeout:
output_line = ray._private.utils.decode(
process_handle.stdout.readline()).strip()
print(output_line)
if output_line == "success":
return
time.sleep(1)
raise RayTestTimeoutException(
"Timed out waiting for process to print success.")
# Make sure we can run this driver repeatedly, which means that resources
# are getting released in between.
for _ in range(5):
out = run_string_as_driver(driver_script1)
# Make sure the first driver ran to completion.
assert "success" in out
# Also make sure that this works when the driver exits ungracefully.
process_handle = run_string_as_driver_nonblocking(driver_script2)
wait_for_success_output(process_handle)
# Kill the process ungracefully.
process_handle.kill()
if __name__ == "__main__":
import pytest
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
| [] | [] | [
"LC_ALL",
"LANG"
] | [] | ["LC_ALL", "LANG"] | python | 2 | 0 | |
staging/src/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Note: the example only works with the code within the same release/branch.
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth"
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
func main() {
var kubeconfig *string
if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
for {
pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
// Examples for error handling:
// - Use helper functions like e.g. errors.IsNotFound()
// - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message
namespace := "default"
pod := "example-xxxxx"
_, err = clientset.CoreV1().Pods(namespace).Get(context.TODO(), pod, metav1.GetOptions{})
if errors.IsNotFound(err) {
fmt.Printf("Pod %s in namespace %s not found\n", pod, namespace)
} else if statusError, isStatus := err.(*errors.StatusError); isStatus {
fmt.Printf("Error getting pod %s in namespace %s: %v\n",
pod, namespace, statusError.ErrStatus.Message)
} else if err != nil {
panic(err.Error())
} else {
fmt.Printf("Found pod %s in namespace %s\n", pod, namespace)
}
time.Sleep(10 * time.Second)
}
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
| [
"\"HOME\"",
"\"USERPROFILE\""
] | [] | [
"HOME",
"USERPROFILE"
] | [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
python/ray/tune/tests/test_trial_runner_3.py | import time
from collections import Counter
import os
import pickle
import shutil
import sys
import tempfile
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.result import TRAINING_ITERATION
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.experiment import Experiment
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources, json_to_resources, resources_to_json
from ray.tune.suggest.repeater import Repeater
from ray.tune.suggest._mock import _MockSuggestionAlgorithm
from ray.tune.suggest.suggestion import Searcher, ConcurrencyLimiter
from ray.tune.suggest.search_generator import SearchGenerator
from ray.tune.syncer import SyncConfig
class TrialRunnerTest3(unittest.TestCase):
def setUp(self):
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
os.environ["TUNE_TRIAL_RESULT_WAIT_TIME_S"] = "99999"
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "auto" # Reset default
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if "CUDA_VISIBLE_DEVICES" in os.environ:
del os.environ["CUDA_VISIBLE_DEVICES"]
shutil.rmtree(self.tmpdir)
def testStepHook(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
def on_step_begin(self, trialrunner):
self._update_avail_resources()
cnt = self.pre_step if hasattr(self, "pre_step") else 0
self.pre_step = cnt + 1
def on_step_end(self, trialrunner):
cnt = self.pre_step if hasattr(self, "post_step") else 0
self.post_step = 1 + cnt
import types
runner.trial_executor.on_step_begin = types.MethodType(
on_step_begin, runner.trial_executor)
runner.trial_executor.on_step_end = types.MethodType(
on_step_end, runner.trial_executor)
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
runner.step()
self.assertEqual(runner.trial_executor.pre_step, 1)
self.assertEqual(runner.trial_executor.post_step, 1)
def testStopTrial(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
# Stop trial while running
runner.stop_trial(trials[0])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.PENDING)
# Stop trial while pending
runner.stop_trial(trials[-1])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
time.sleep(2) # Wait for stopped placement group to free resources
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
def testSearchAlgNotification(self):
"""Checks notification of trial to the Search Algorithm."""
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "1" # Don't finish early
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg = _MockSuggestionAlgorithm()
searcher = search_alg.searcher
search_alg.add_configurations(experiments)
runner = TrialRunner(search_alg=search_alg)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(searcher.counter["result"], 1)
self.assertEqual(searcher.counter["complete"], 1)
def testSearchAlgFinished(self):
"""Checks that SearchAlg is Finished before all trials are done."""
ray.init(num_cpus=4, local_mode=True, include_dashboard=False)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 1}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm()
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgSchedulerInteraction(self):
"""Checks that TrialScheduler killing trial will notify SearchAlg."""
class _MockScheduler(FIFOScheduler):
def on_trial_result(self, *args, **kwargs):
return TrialScheduler.STOP
ray.init(num_cpus=4, local_mode=True, include_dashboard=False)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm()
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher, scheduler=_MockScheduler())
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgStalled(self):
"""Checks that runner and searcher state is maintained when stalled."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {
"run": "__fake",
"num_samples": 3,
"stop": {
"training_iteration": 1
}
}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg = _MockSuggestionAlgorithm(max_concurrent=1)
search_alg.add_configurations(experiments)
searcher = search_alg.searcher
runner = TrialRunner(search_alg=search_alg)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
searcher.stall = True
runner.step()
self.assertEqual(trials[1].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(all(trial.is_finished() for trial in trials))
self.assertFalse(search_alg.is_finished())
self.assertFalse(runner.is_finished())
searcher.stall = False
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
runner.step()
self.assertEqual(trials[2].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(search_alg.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgFinishes(self):
"""Empty SearchAlg changing state in `next_trials` does not crash."""
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
class FinishFastAlg(_MockSuggestionAlgorithm):
_index = 0
def next_trial(self):
spec = self._experiment.spec
trial = None
if self._index < spec["num_samples"]:
trial = Trial(
spec.get("run"), stopping_criterion=spec.get("stop"))
self._index += 1
if self._index > 4:
self.set_finished()
return trial
def suggest(self, trial_id):
return {}
ray.init(num_cpus=2, local_mode=True, include_dashboard=False)
experiment_spec = {
"run": "__fake",
"num_samples": 2,
"stop": {
"training_iteration": 1
}
}
searcher = FinishFastAlg()
experiments = [Experiment.from_json("test", experiment_spec)]
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
self.assertFalse(runner.is_finished())
runner.step() # This launches a new run
runner.step() # This launches a 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the first run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # this converts self._finished to True
self.assertTrue(searcher.is_finished())
self.assertRaises(TuneError, runner.step)
def testSearcherSaveRestore(self):
ray.init(num_cpus=8, local_mode=True)
def create_searcher():
class TestSuggestion(Searcher):
def __init__(self, index):
self.index = index
self.returned_result = []
super().__init__(metric="episode_reward_mean", mode="max")
def suggest(self, trial_id):
self.index += 1
return {"test_variable": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
def save(self, checkpoint_path):
with open(checkpoint_path, "wb") as f:
pickle.dump(self.__dict__, f)
def restore(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
self.__dict__.update(pickle.load(f))
searcher = TestSuggestion(0)
searcher = ConcurrencyLimiter(searcher, max_concurrent=2)
searcher = Repeater(searcher, repeat=3, set_index=False)
search_alg = SearchGenerator(searcher)
experiment_spec = {
"run": "__fake",
"num_samples": 20,
"stop": {
"training_iteration": 2
}
}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg.add_configurations(experiments)
return search_alg
searcher = create_searcher()
runner = TrialRunner(
search_alg=searcher,
local_checkpoint_dir=self.tmpdir,
checkpoint_period=-1)
for i in range(6):
runner.step()
assert len(
runner.get_trials()) == 6, [t.config for t in runner.get_trials()]
runner.checkpoint()
trials = runner.get_trials()
[
runner.trial_executor.stop_trial(t) for t in trials
if t.status is not Trial.ERROR
]
del runner
# stop_all(runner.get_trials())
searcher = create_searcher()
runner2 = TrialRunner(
search_alg=searcher,
local_checkpoint_dir=self.tmpdir,
resume="LOCAL")
assert len(runner2.get_trials()) == 6, [
t.config for t in runner2.get_trials()
]
def trial_statuses():
return [t.status for t in runner2.get_trials()]
def num_running_trials():
return sum(t.status == Trial.RUNNING for t in runner2.get_trials())
for i in range(6):
runner2.step()
assert len(set(trial_statuses())) == 1
assert Trial.RUNNING in trial_statuses()
for i in range(20):
runner2.step()
assert 1 <= num_running_trials() <= 6
evaluated = [
t.evaluated_params["test_variable"] for t in runner2.get_trials()
]
count = Counter(evaluated)
assert all(v <= 3 for v in count.values())
def testTrialErrorResumeFalse(self):
ray.init(num_cpus=3, local_mode=True, include_dashboard=False)
runner = TrialRunner(local_checkpoint_dir=self.tmpdir)
kwargs = {
"stopping_criterion": {
"training_iteration": 4
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [
Trial("__fake", config={"mock_error": True}, **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
]
for t in trials:
runner.add_trial(t)
while not runner.is_finished():
runner.step()
runner.checkpoint(force=True)
assert trials[0].status == Trial.ERROR
del runner
new_runner = TrialRunner(resume=True, local_checkpoint_dir=self.tmpdir)
assert len(new_runner.get_trials()) == 3
assert Trial.ERROR in (t.status for t in new_runner.get_trials())
def testTrialErrorResumeTrue(self):
ray.init(num_cpus=3, local_mode=True, include_dashboard=False)
runner = TrialRunner(local_checkpoint_dir=self.tmpdir)
kwargs = {
"stopping_criterion": {
"training_iteration": 4
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [
Trial("__fake", config={"mock_error": True}, **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
]
for t in trials:
runner.add_trial(t)
while not runner.is_finished():
runner.step()
runner.checkpoint(force=True)
assert trials[0].status == Trial.ERROR
del runner
new_runner = TrialRunner(
resume="ERRORED_ONLY", local_checkpoint_dir=self.tmpdir)
assert len(new_runner.get_trials()) == 3
assert Trial.ERROR not in (t.status for t in new_runner.get_trials())
# The below is just a check for standard behavior.
disable_error = False
for t in new_runner.get_trials():
if t.config.get("mock_error"):
t.config["mock_error"] = False
disable_error = True
assert disable_error
while not new_runner.is_finished():
new_runner.step()
assert Trial.ERROR not in (t.status for t in new_runner.get_trials())
def testTrialSaveRestore(self):
"""Creates different trials to test runner.checkpoint/restore."""
ray.init(num_cpus=3)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
trials = [
Trial(
"__fake",
trial_id="trial_terminate",
stopping_criterion={"training_iteration": 1},
checkpoint_freq=1)
]
runner.add_trial(trials[0])
runner.step() # Start trial
runner.step() # Process result, dispatch save
runner.step() # Process save
self.assertEqual(trials[0].status, Trial.TERMINATED)
trials += [
Trial(
"__fake",
trial_id="trial_fail",
stopping_criterion={"training_iteration": 3},
checkpoint_freq=1,
config={"mock_error": True})
]
runner.add_trial(trials[1])
runner.step() # Start trial
runner.step() # Process result, dispatch save
runner.step() # Process save
runner.step() # Error
self.assertEqual(trials[1].status, Trial.ERROR)
trials += [
Trial(
"__fake",
trial_id="trial_succ",
stopping_criterion={"training_iteration": 2},
checkpoint_freq=1)
]
runner.add_trial(trials[2])
runner.step() # Start trial
self.assertEqual(len(runner.trial_executor.get_checkpoints()), 3)
self.assertEqual(trials[2].status, Trial.RUNNING)
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=self.tmpdir)
for tid in ["trial_terminate", "trial_fail"]:
original_trial = runner.get_trial(tid)
restored_trial = runner2.get_trial(tid)
self.assertEqual(original_trial.status, restored_trial.status)
restored_trial = runner2.get_trial("trial_succ")
self.assertEqual(Trial.PENDING, restored_trial.status)
runner2.step() # Start trial
runner2.step() # Process result, dispatch save
runner2.step() # Process save
runner2.step() # Process result, dispatch save
runner2.step() # Process save
self.assertRaises(TuneError, runner2.step)
def testTrialNoSave(self):
"""Check that non-checkpointing trials are not saved."""
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
ray.init(num_cpus=3)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(
Trial(
"__fake",
trial_id="non_checkpoint",
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="checkpoint",
checkpoint_at_end=True,
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="pending",
stopping_criterion={"training_iteration": 2}))
runner.step()
runner.step()
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=self.tmpdir)
new_trials = runner2.get_trials()
self.assertEqual(len(new_trials), 3)
self.assertTrue(
runner2.get_trial("non_checkpoint").status == Trial.TERMINATED)
self.assertTrue(
runner2.get_trial("checkpoint").status == Trial.TERMINATED)
self.assertTrue(runner2.get_trial("pending").status == Trial.PENDING)
self.assertTrue(
not runner2.get_trial("pending").has_reported_at_least_once)
runner2.step()
def testCheckpointWithFunction(self):
ray.init(num_cpus=2)
trial = Trial(
"__fake",
config={"callbacks": {
"on_episode_start": lambda i: i,
}},
checkpoint_freq=1)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(trial)
for _ in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=self.tmpdir)
new_trial = runner2.get_trials()[0]
self.assertTrue("callbacks" in new_trial.config)
self.assertTrue("on_episode_start" in new_trial.config["callbacks"])
def testCheckpointOverwrite(self):
def count_checkpoints(cdir):
return sum((fname.startswith("experiment_state")
and fname.endswith(".json"))
for fname in os.listdir(cdir))
ray.init(num_cpus=2)
trial = Trial("__fake", checkpoint_freq=1)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
runner.add_trial(trial)
for _ in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
self.assertEqual(count_checkpoints(tmpdir), 1)
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
for _ in range(5):
runner2.step()
self.assertEqual(count_checkpoints(tmpdir), 2)
runner2.checkpoint()
self.assertEqual(count_checkpoints(tmpdir), 2)
shutil.rmtree(tmpdir)
def testCheckpointFreqBuffered(self):
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "7"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "1"
def num_checkpoints(trial):
return sum(
item.startswith("checkpoint_")
for item in os.listdir(trial.logdir))
ray.init(num_cpus=2)
trial = Trial("__fake", checkpoint_freq=3)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(trial)
runner.step() # start trial
runner.step() # run iteration 1-3
runner.step() # process save
self.assertEqual(trial.last_result[TRAINING_ITERATION], 3)
self.assertEqual(num_checkpoints(trial), 1)
runner.step() # run iteration 4-6
runner.step() # process save
self.assertEqual(trial.last_result[TRAINING_ITERATION], 6)
self.assertEqual(num_checkpoints(trial), 2)
runner.step() # run iteration 7-9
runner.step() # process save
self.assertEqual(trial.last_result[TRAINING_ITERATION], 9)
self.assertEqual(num_checkpoints(trial), 3)
def testCheckpointAtEndNotBuffered(self):
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "7"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "0.5"
def num_checkpoints(trial):
return sum(
item.startswith("checkpoint_")
for item in os.listdir(trial.logdir))
ray.init(num_cpus=2)
trial = Trial(
"__fake",
checkpoint_at_end=True,
stopping_criterion={"training_iteration": 4})
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(trial)
runner.step() # start trial
runner.step() # run iteration 1
self.assertEqual(trial.last_result[TRAINING_ITERATION], 1)
self.assertEqual(num_checkpoints(trial), 0)
runner.step() # run iteration 2
self.assertEqual(trial.last_result[TRAINING_ITERATION], 2)
self.assertEqual(num_checkpoints(trial), 0)
runner.step() # run iteration 3
self.assertEqual(trial.last_result[TRAINING_ITERATION], 3)
self.assertEqual(num_checkpoints(trial), 0)
runner.step() # run iteration 4
self.assertEqual(trial.last_result[TRAINING_ITERATION], 4)
self.assertEqual(num_checkpoints(trial), 1)
def testCheckpointAtEndBuffered(self):
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "7"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "0.5"
def num_checkpoints(trial):
return sum(
item.startswith("checkpoint_")
for item in os.listdir(trial.logdir))
ray.init(num_cpus=2)
trial = Trial(
"__fake",
checkpoint_at_end=True,
stopping_criterion={"training_iteration": 4})
# Force result buffer length
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir,
checkpoint_period=0,
trial_executor=RayTrialExecutor(result_buffer_length=7))
runner.add_trial(trial)
runner.step() # start trial
self.assertEqual(num_checkpoints(trial), 0)
runner.step() # run iterations 1-7
self.assertEqual(trial.last_result[TRAINING_ITERATION], 7)
self.assertEqual(num_checkpoints(trial), 1)
def testUserCheckpoint(self):
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "1" # Don't finish early
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
ray.init(num_cpus=3)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(Trial("__fake", config={"user_checkpoint_freq": 2}))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.step() # Process result
self.assertFalse(trials[0].has_checkpoint())
runner.step() # Process result
self.assertFalse(trials[0].has_checkpoint())
runner.step() # Process result, dispatch save
runner.step() # Process save
self.assertTrue(trials[0].has_checkpoint())
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=self.tmpdir)
runner2.step() # 5: Start trial and dispatch restore
trials2 = runner2.get_trials()
self.assertEqual(ray.get(trials2[0].runner.get_info.remote()), 1)
def testUserCheckpointBuffered(self):
os.environ["TUNE_RESULT_BUFFER_LENGTH"] = "8"
os.environ["TUNE_RESULT_BUFFER_MIN_TIME_S"] = "1"
def num_checkpoints(trial):
return sum(
item.startswith("checkpoint_")
for item in os.listdir(trial.logdir))
ray.init(num_cpus=3)
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir, checkpoint_period=0)
runner.add_trial(Trial("__fake", config={"user_checkpoint_freq": 10}))
trials = runner.get_trials()
runner.step() # Start trial, schedule 1-8
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
self.assertEqual(num_checkpoints(trials[0]), 0)
runner.step() # Process results 0-8, schedule 9-11 (CP)
self.assertEqual(trials[0].last_result.get(TRAINING_ITERATION), 8)
self.assertFalse(trials[0].has_checkpoint())
self.assertEqual(num_checkpoints(trials[0]), 0)
runner.step() # Process results 9-11
runner.step() # handle CP, schedule 12-19
self.assertEqual(trials[0].last_result.get(TRAINING_ITERATION), 11)
self.assertTrue(trials[0].has_checkpoint())
self.assertEqual(num_checkpoints(trials[0]), 1)
runner.step() # Process results 12-19, schedule 20-21
self.assertEqual(trials[0].last_result.get(TRAINING_ITERATION), 19)
self.assertTrue(trials[0].has_checkpoint())
self.assertEqual(num_checkpoints(trials[0]), 1)
runner.step() # Process results 20-21
runner.step() # handle CP, schedule 21-29
self.assertEqual(trials[0].last_result.get(TRAINING_ITERATION), 21)
self.assertTrue(trials[0].has_checkpoint())
self.assertEqual(num_checkpoints(trials[0]), 2)
runner.step() # Process results 21-29, schedule 30-31
self.assertEqual(trials[0].last_result.get(TRAINING_ITERATION), 29)
self.assertTrue(trials[0].has_checkpoint())
self.assertTrue(trials[0].has_checkpoint())
self.assertEqual(num_checkpoints(trials[0]), 2)
@patch("ray.tune.syncer.SYNC_PERIOD", 0)
def testCheckpointAutoPeriod(self):
ray.init(num_cpus=3)
# This makes checkpointing take 2 seconds.
def sync_up(source, target):
time.sleep(2)
return True
runner = TrialRunner(
local_checkpoint_dir=self.tmpdir,
checkpoint_period="auto",
sync_config=SyncConfig(upload_dir="fake", syncer=sync_up),
remote_checkpoint_dir="fake")
runner.add_trial(Trial("__fake", config={"user_checkpoint_freq": 1}))
runner.step() # Run one step, this will trigger checkpointing
self.assertGreaterEqual(runner._checkpoint_manager._checkpoint_period,
38.)
class SearchAlgorithmTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(
num_cpus=4, num_gpus=0, local_mode=True, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
_register_all()
def testNestedSuggestion(self):
class TestSuggestion(Searcher):
def suggest(self, trial_id):
return {"a": {"b": {"c": {"d": 4, "e": 5}}}}
searcher = TestSuggestion()
alg = SearchGenerator(searcher)
alg.add_configurations({"test": {"run": "__fake"}})
trial = alg.next_trial()
self.assertTrue("e=5" in trial.experiment_tag)
self.assertTrue("d=4" in trial.experiment_tag)
def _test_repeater(self, num_samples, repeat):
class TestSuggestion(Searcher):
index = 0
def suggest(self, trial_id):
self.index += 1
return {"test_variable": 5 + self.index}
def on_trial_complete(self, *args, **kwargs):
return
searcher = TestSuggestion(metric="episode_reward_mean")
repeat_searcher = Repeater(searcher, repeat=repeat, set_index=False)
alg = SearchGenerator(repeat_searcher)
experiment_spec = {
"run": "__fake",
"num_samples": num_samples,
"stop": {
"training_iteration": 1
}
}
alg.add_configurations({"test": experiment_spec})
runner = TrialRunner(search_alg=alg)
while not runner.is_finished():
runner.step()
return runner.get_trials()
def testRepeat1(self):
trials = self._test_repeater(num_samples=2, repeat=1)
self.assertEqual(len(trials), 2)
parameter_set = {t.evaluated_params["test_variable"] for t in trials}
self.assertEqual(len(parameter_set), 2)
def testRepeat4(self):
trials = self._test_repeater(num_samples=12, repeat=4)
self.assertEqual(len(trials), 12)
parameter_set = {t.evaluated_params["test_variable"] for t in trials}
self.assertEqual(len(parameter_set), 3)
def testOddRepeat(self):
trials = self._test_repeater(num_samples=11, repeat=5)
self.assertEqual(len(trials), 11)
parameter_set = {t.evaluated_params["test_variable"] for t in trials}
self.assertEqual(len(parameter_set), 3)
def testSetGetRepeater(self):
class TestSuggestion(Searcher):
def __init__(self, index):
self.index = index
self.returned_result = []
super().__init__(metric="result", mode="max")
def suggest(self, trial_id):
self.index += 1
return {"score": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
searcher = TestSuggestion(0)
repeater1 = Repeater(searcher, repeat=3, set_index=False)
for i in range(3):
assert repeater1.suggest(f"test_{i}")["score"] == 1
for i in range(2): # An incomplete set of results
assert repeater1.suggest(f"test_{i}_2")["score"] == 2
# Restore a new one
state = repeater1.get_state()
del repeater1
new_repeater = Repeater(searcher, repeat=1, set_index=True)
new_repeater.set_state(state)
assert new_repeater.repeat == 3
assert new_repeater.suggest("test_2_2")["score"] == 2
assert new_repeater.suggest("test_x")["score"] == 3
# Report results
for i in range(3):
new_repeater.on_trial_complete(f"test_{i}", {"result": 2})
for i in range(3):
new_repeater.on_trial_complete(f"test_{i}_2", {"result": -i * 10})
assert len(new_repeater.searcher.returned_result) == 2
assert new_repeater.searcher.returned_result[-1] == {"result": -10}
# Finish the rest of the last trial group
new_repeater.on_trial_complete("test_x", {"result": 3})
assert new_repeater.suggest("test_y")["score"] == 3
new_repeater.on_trial_complete("test_y", {"result": 3})
assert len(new_repeater.searcher.returned_result) == 2
assert new_repeater.suggest("test_z")["score"] == 3
new_repeater.on_trial_complete("test_z", {"result": 3})
assert len(new_repeater.searcher.returned_result) == 3
assert new_repeater.searcher.returned_result[-1] == {"result": 3}
def testSetGetLimiter(self):
class TestSuggestion(Searcher):
def __init__(self, index):
self.index = index
self.returned_result = []
super().__init__(metric="result", mode="max")
def suggest(self, trial_id):
self.index += 1
return {"score": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
searcher = TestSuggestion(0)
limiter = ConcurrencyLimiter(searcher, max_concurrent=2)
assert limiter.suggest("test_1")["score"] == 1
assert limiter.suggest("test_2")["score"] == 2
assert limiter.suggest("test_3") is None
state = limiter.get_state()
del limiter
limiter2 = ConcurrencyLimiter(searcher, max_concurrent=3)
limiter2.set_state(state)
assert limiter2.suggest("test_4") is None
assert limiter2.suggest("test_5") is None
limiter2.on_trial_complete("test_1", {"result": 3})
limiter2.on_trial_complete("test_2", {"result": 3})
assert limiter2.suggest("test_3")["score"] == 3
def testBasicVariantLimiter(self):
search_alg = BasicVariantGenerator(max_concurrent=2)
experiment_spec = {
"run": "__fake",
"num_samples": 5,
"stop": {
"training_iteration": 1
}
}
search_alg.add_configurations({"test": experiment_spec})
trial1 = search_alg.next_trial()
self.assertTrue(trial1)
trial2 = search_alg.next_trial()
self.assertTrue(trial2)
# Returns None because of limiting
trial3 = search_alg.next_trial()
self.assertFalse(trial3)
# Finish trial, now trial 3 should be created
search_alg.on_trial_complete(trial1.trial_id, None, False)
trial3 = search_alg.next_trial()
self.assertTrue(trial3)
trial4 = search_alg.next_trial()
self.assertFalse(trial4)
search_alg.on_trial_complete(trial2.trial_id, None, False)
search_alg.on_trial_complete(trial3.trial_id, None, False)
trial4 = search_alg.next_trial()
self.assertTrue(trial4)
trial5 = search_alg.next_trial()
self.assertTrue(trial5)
search_alg.on_trial_complete(trial4.trial_id, None, False)
# Should also be None because search is finished
trial6 = search_alg.next_trial()
self.assertFalse(trial6)
def testBatchLimiter(self):
class TestSuggestion(Searcher):
def __init__(self, index):
self.index = index
self.returned_result = []
super().__init__(metric="result", mode="max")
def suggest(self, trial_id):
self.index += 1
return {"score": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
searcher = TestSuggestion(0)
limiter = ConcurrencyLimiter(searcher, max_concurrent=2, batch=True)
assert limiter.suggest("test_1")["score"] == 1
assert limiter.suggest("test_2")["score"] == 2
assert limiter.suggest("test_3") is None
limiter.on_trial_complete("test_1", {"result": 3})
assert limiter.suggest("test_3") is None
limiter.on_trial_complete("test_2", {"result": 3})
assert limiter.suggest("test_3") is not None
def testBatchLimiterInfiniteLoop(self):
"""Check whether an infinite loop when less than max_concurrent trials
are suggested with batch mode is avoided.
"""
class TestSuggestion(Searcher):
def __init__(self, index, max_suggestions=10):
self.index = index
self.max_suggestions = max_suggestions
self.returned_result = []
super().__init__(metric="result", mode="max")
def suggest(self, trial_id):
self.index += 1
if self.index > self.max_suggestions:
return None
return {"score": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
self.index = 0
searcher = TestSuggestion(0, 2)
limiter = ConcurrencyLimiter(searcher, max_concurrent=5, batch=True)
limiter.suggest("test_1")
limiter.suggest("test_2")
limiter.suggest("test_3") # TestSuggestion return None
limiter.on_trial_complete("test_1", {"result": 3})
limiter.on_trial_complete("test_2", {"result": 3})
assert limiter.searcher.returned_result
searcher = TestSuggestion(0, 10)
limiter = ConcurrencyLimiter(searcher, max_concurrent=5, batch=True)
limiter.suggest("test_1")
limiter.suggest("test_2")
limiter.suggest("test_3")
limiter.on_trial_complete("test_1", {"result": 3})
limiter.on_trial_complete("test_2", {"result": 3})
assert not limiter.searcher.returned_result
class ResourcesTest(unittest.TestCase):
def testSubtraction(self):
resource_1 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
resource_2 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
new_res = Resources.subtract(resource_1, resource_2)
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(all(k == 0 for k in new_res.custom_resources.values()))
self.assertTrue(
all(k == 0 for k in new_res.extra_custom_resources.values()))
def testDifferentResources(self):
resource_1 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
resource_2 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "c": 2})
new_res = Resources.subtract(resource_1, resource_2)
assert "c" in new_res.custom_resources
assert "b" in new_res.custom_resources
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(new_res.get("a") == 0)
def testSerialization(self):
original = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
jsoned = resources_to_json(original)
new_resource = json_to_resources(jsoned)
self.assertEqual(original, new_resource)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [] | [] | [
"TUNE_PLACEMENT_GROUP_WAIT_S",
"TUNE_TRIAL_STARTUP_GRACE_PERIOD",
"TUNE_RESULT_BUFFER_LENGTH",
"TUNE_RESULT_BUFFER_MIN_TIME_S",
"TUNE_TRIAL_RESULT_WAIT_TIME_S",
"TUNE_MAX_PENDING_TRIALS_PG",
"CUDA_VISIBLE_DEVICES"
] | [] | ["TUNE_PLACEMENT_GROUP_WAIT_S", "TUNE_TRIAL_STARTUP_GRACE_PERIOD", "TUNE_RESULT_BUFFER_LENGTH", "TUNE_RESULT_BUFFER_MIN_TIME_S", "TUNE_TRIAL_RESULT_WAIT_TIME_S", "TUNE_MAX_PENDING_TRIALS_PG", "CUDA_VISIBLE_DEVICES"] | python | 7 | 0 | |
utils.py | import os
def get_token(args):
"""
Parse command line arguments
Args:
args: object : Parsed arguments
Returns:
Tellus API token
"""
if args.token is not None:
return args.token
if "TELLUS_API_TOKEN" in os.environ:
return os.environ["TELLUS_API_TOKEN"]
print(
"Please set your API token in option '-t <API_TOKEN>' "
+ "or environment variable TELLUS_API_TOKEN"
)
return ""
| [] | [] | [
"TELLUS_API_TOKEN"
] | [] | ["TELLUS_API_TOKEN"] | python | 1 | 0 | |
sdk/identity/azure-identity/tests/test_authn_client_async.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
from unittest.mock import Mock, patch
from urllib.parse import urlparse
import pytest
from azure.identity._constants import EnvironmentVariables
from azure.identity.aio._authn_client import AsyncAuthnClient
from helpers import mock_response
from helpers_async import wrap_in_future
@pytest.mark.asyncio
async def test_request_url():
authority = "authority.com"
tenant = "expected_tenant"
def mock_send(request, **kwargs):
scheme, netloc, path, _, _, _ = urlparse(request.url)
assert scheme == "https"
assert netloc == authority
assert path.startswith("/" + tenant)
return mock_response(json_payload={"token_type": "Bearer", "expires_in": 42, "access_token": "***"})
client = AsyncAuthnClient(tenant=tenant, transport=Mock(send=wrap_in_future(mock_send)), authority=authority)
await client.request_token(("scope",))
# authority can be configured via environment variable
with patch.dict("os.environ", {EnvironmentVariables.AZURE_AUTHORITY_HOST: authority}, clear=True):
client = AsyncAuthnClient(tenant=tenant, transport=Mock(send=wrap_in_future(mock_send)))
await client.request_token(("scope",))
| [] | [] | [] | [] | [] | python | 0 | 0 | |
DjangoBasic/DjangoBasic/asgi.py | """
ASGI config for DjangoBasic project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoBasic.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tools/proto-gae/proto_gae.go | // Copyright 2015 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"sort"
"strings"
"text/template"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/flag/stringsetflag"
)
type app struct {
out io.Writer
packageName string
typeNames stringsetflag.Flag
outFile string
header string
}
const help = `Usage of %s:
%s is a go-generator program that generates PropertyConverter implementations
for types produced by protoc. It can be used in a go generation file like:
//go:generate <protoc command>
//go:generate proto-gae -type MessageType -type OtherMessageType
This will produce a new file which implements the ToProperty and FromProperty
methods for the named types.
Options:
`
const copyright = `// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
`
func (a *app) parseArgs(fs *flag.FlagSet, args []string) error {
fs.SetOutput(a.out)
fs.Usage = func() {
fmt.Fprintf(a.out, help, args[0], args[0])
fs.PrintDefaults()
}
fs.Var(&a.typeNames, "type",
"A generated proto.Message type to generate stubs for (required, repeatable)")
fs.StringVar(&a.outFile, "out", "proto_gae.gen.go",
"The name of the output file")
fs.StringVar(&a.header, "header", copyright, "Header text to put at the top of "+
"the generated file. Defaults to the LUCI Authors copyright.")
if err := fs.Parse(args[1:]); err != nil {
return err
}
fail := errors.MultiError(nil)
if a.typeNames.Data == nil || a.typeNames.Data.Len() == 0 {
fail = append(fail, errors.New("must specify one or more -type"))
}
if !strings.HasSuffix(a.outFile, ".go") {
fail = append(fail, errors.New("-output must end with '.go'"))
}
if len(fail) > 0 {
for _, e := range fail {
fmt.Fprintln(a.out, "error:", e)
}
fmt.Fprintln(a.out)
fs.Usage()
return fail
}
return nil
}
var tmpl = template.Must(
template.New("main").Parse(`{{if index . "header"}}{{index . "header"}}
{{end}}// AUTOGENERATED: Do not edit
package {{index . "package"}}
import (
"github.com/golang/protobuf/proto"
"github.com/conchoid/gae/service/datastore"
){{range index . "types"}}
var _ datastore.PropertyConverter = (*{{.}})(nil)
// ToProperty implements datastore.PropertyConverter. It causes an embedded
// '{{.}}' to serialize to an unindexed '[]byte' when used with the
// "github.com/conchoid/gae" library.
func (p *{{.}}) ToProperty() (prop datastore.Property, err error) {
data, err := proto.Marshal(p)
if err == nil {
prop.SetValue(data, datastore.NoIndex)
}
return
}
// FromProperty implements datastore.PropertyConverter. It parses a '[]byte'
// into an embedded '{{.}}' when used with the "github.com/conchoid/gae" library.
func (p *{{.}}) FromProperty(prop datastore.Property) error {
data, err := prop.Project(datastore.PTBytes)
if err != nil {
return err
}
return proto.Unmarshal(data.([]byte), p)
}{{end}}
`))
func (a *app) writeTo(w io.Writer) error {
typeNames := a.typeNames.Data.ToSlice()
sort.Strings(typeNames)
return tmpl.Execute(w, map[string]interface{}{
"package": a.packageName,
"types": typeNames,
"header": a.header,
})
}
func (a *app) main() {
if err := a.parseArgs(flag.NewFlagSet(os.Args[0], flag.ContinueOnError), os.Args); err != nil {
os.Exit(1)
}
ofile, err := os.Create(a.outFile)
if err != nil {
fmt.Fprintf(a.out, "error: %s", err)
os.Exit(2)
}
closeFn := func(delete bool) {
if ofile != nil {
if err := ofile.Close(); err != nil {
fmt.Fprintf(a.out, "error while closing file: %s", err)
}
if delete {
if err := os.Remove(a.outFile); err != nil {
fmt.Fprintf(a.out, "failed to remove file!")
}
}
}
ofile = nil
}
defer closeFn(false)
buf := bufio.NewWriter(ofile)
err = a.writeTo(buf)
if err != nil {
fmt.Fprintf(a.out, "error while writing: %s", err)
closeFn(true)
os.Exit(3)
}
if err := buf.Flush(); err != nil {
fmt.Fprintf(a.out, "error while writing: %s", err)
closeFn(true)
os.Exit(4)
}
}
func main() {
(&app{out: os.Stderr, packageName: os.Getenv("GOPACKAGE")}).main()
}
| [
"\"GOPACKAGE\""
] | [] | [
"GOPACKAGE"
] | [] | ["GOPACKAGE"] | go | 1 | 0 | |
dragonchain/lib/error_reporter.py | # Copyright 2019 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import os
import json
import time
import traceback
from dragonchain import logger
from dragonchain.lib.dto import schema
_log = logger.get_logger()
REPORT_ERRORS = False
_reporting_type = (os.environ.get("ERROR_REPORTING_TYPE") or "").lower()
if _reporting_type == "sns":
REPORT_ERRORS = True
from dragonchain.lib.interfaces.aws.sns import send_error_message as reporter
elif _reporting_type == "storage":
REPORT_ERRORS = True
from dragonchain.lib.interfaces.storage import save_error_message as reporter # noqa: T484 alternative import is expected
def get_exception_message(exception: Exception) -> str:
if len(exception.args) > 0:
return str(exception.args[0])
return ""
def report_exception(exception: Exception, message: str) -> None:
_log.exception("Error:")
if REPORT_ERRORS:
reporter(
json.dumps(
{
"dcrn": schema.DCRN.Error_InTransit_Template.value.format(os.environ["LEVEL"]),
"version": "1",
"app": "Dragonchain",
"timestamp": int(time.time()),
"service": os.environ.get("SERVICE"),
"stack_trace": "".join(traceback.format_tb(exception.__traceback__)),
"message": json.dumps({"internal_id": os.environ.get("INTERNAL_ID")}),
},
separators=(",", ":"),
)
)
| [] | [] | [
"ERROR_REPORTING_TYPE",
"LEVEL",
"INTERNAL_ID",
"SERVICE"
] | [] | ["ERROR_REPORTING_TYPE", "LEVEL", "INTERNAL_ID", "SERVICE"] | python | 4 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'site_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
internal/dao/helm.go | package dao
import (
"context"
"fmt"
"os"
"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/render"
"github.com/rs/zerolog/log"
"helm.sh/helm/v3/pkg/action"
"k8s.io/apimachinery/pkg/runtime"
)
var (
_ Accessor = (*Helm)(nil)
_ Nuker = (*Helm)(nil)
_ Describer = (*Helm)(nil)
)
// Helm represents a helm chart.
type Helm struct {
NonResource
}
// List returns a collection of resources.
func (c *Helm) List(ctx context.Context, ns string) ([]runtime.Object, error) {
cfg, err := c.EnsureHelmConfig(ns)
if err != nil {
return nil, err
}
rr, err := action.NewList(cfg).Run()
if err != nil {
return nil, err
}
oo := make([]runtime.Object, 0, len(rr))
for _, r := range rr {
oo = append(oo, render.HelmRes{Release: r})
}
return oo, nil
}
// Get returns a resource.
func (c *Helm) Get(_ context.Context, path string) (runtime.Object, error) {
ns, n := client.Namespaced(path)
cfg, err := c.EnsureHelmConfig(ns)
if err != nil {
return nil, err
}
resp, err := action.NewGet(cfg).Run(n)
if err != nil {
return nil, err
}
return render.HelmRes{Release: resp}, nil
}
// Describe returns the chart notes.
func (c *Helm) Describe(path string) (string, error) {
ns, n := client.Namespaced(path)
cfg, err := c.EnsureHelmConfig(ns)
if err != nil {
return "", err
}
resp, err := action.NewGet(cfg).Run(n)
if err != nil {
return "", err
}
return resp.Info.Notes, nil
}
// ToYAML returns the chart manifest.
func (c *Helm) ToYAML(path string, showManaged bool) (string, error) {
ns, n := client.Namespaced(path)
cfg, err := c.EnsureHelmConfig(ns)
if err != nil {
return "", err
}
resp, err := action.NewGet(cfg).Run(n)
if err != nil {
return "", err
}
return resp.Manifest, nil
}
// Delete uninstall a Helm.
func (c *Helm) Delete(path string, cascade, force bool) error {
ns, n := client.Namespaced(path)
cfg, err := c.EnsureHelmConfig(ns)
if err != nil {
return err
}
res, err := action.NewUninstall(cfg).Run(n)
if err != nil {
return err
}
if res != nil && res.Info != "" {
return fmt.Errorf("%s", res.Info)
}
return nil
}
// EnsureHelmConfig return a new configuration.
func (c *Helm) EnsureHelmConfig(ns string) (*action.Configuration, error) {
cfg := new(action.Configuration)
if err := cfg.Init(c.Client().Config().Flags(), ns, os.Getenv("HELM_DRIVER"), helmLogger); err != nil {
return nil, err
}
return cfg, nil
}
func helmLogger(s string, args ...interface{}) {
log.Debug().Msgf("%s %v", s, args)
}
| [
"\"HELM_DRIVER\""
] | [] | [
"HELM_DRIVER"
] | [] | ["HELM_DRIVER"] | go | 1 | 0 | |
tests/unit/gapic/documentai_v1beta2/test_document_understanding_service.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceAsyncClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
transports,
)
from google.cloud.documentai_v1beta2.types import document
from google.cloud.documentai_v1beta2.types import document_understanding
from google.cloud.documentai_v1beta2.types import geometry
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.rpc import status_pb2 as status # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DocumentUnderstandingServiceClient._get_default_mtls_endpoint(None) is None
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[DocumentUnderstandingServiceClient, DocumentUnderstandingServiceAsyncClient,],
)
def test_document_understanding_service_client_from_service_account_info(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "us-documentai.googleapis.com:443"
@pytest.mark.parametrize(
"client_class",
[DocumentUnderstandingServiceClient, DocumentUnderstandingServiceAsyncClient,],
)
def test_document_understanding_service_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_client_get_transport_class():
transport = DocumentUnderstandingServiceClient.get_transport_class()
available_transports = [
transports.DocumentUnderstandingServiceGrpcTransport,
]
assert transport in available_transports
transport = DocumentUnderstandingServiceClient.get_transport_class("grpc")
assert transport == transports.DocumentUnderstandingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DocumentUnderstandingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentUnderstandingServiceClient),
)
@mock.patch.object(
DocumentUnderstandingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentUnderstandingServiceAsyncClient),
)
def test_document_understanding_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
"true",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
"false",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DocumentUnderstandingServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentUnderstandingServiceClient),
)
@mock.patch.object(
DocumentUnderstandingServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentUnderstandingServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_document_understanding_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_document_understanding_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_document_understanding_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_document_understanding_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DocumentUnderstandingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_batch_process_documents(
transport: str = "grpc",
request_type=document_understanding.BatchProcessDocumentsRequest,
):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.BatchProcessDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_process_documents_from_dict():
test_batch_process_documents(request_type=dict)
def test_batch_process_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
client.batch_process_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.BatchProcessDocumentsRequest()
@pytest.mark.asyncio
async def test_batch_process_documents_async(
transport: str = "grpc_asyncio",
request_type=document_understanding.BatchProcessDocumentsRequest,
):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.BatchProcessDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_process_documents_async_from_dict():
await test_batch_process_documents_async(request_type=dict)
def test_batch_process_documents_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_process_documents_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_batch_process_documents_flattened():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
def test_batch_process_documents_flattened_error():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_error_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
def test_process_document(
transport: str = "grpc", request_type=document_understanding.ProcessDocumentRequest
):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document(
mime_type="mime_type_value", text="text_value", uri="uri_value",
)
response = client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.ProcessDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
def test_process_document_from_dict():
test_process_document(request_type=dict)
def test_process_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
client.process_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.ProcessDocumentRequest()
@pytest.mark.asyncio
async def test_process_document_async(
transport: str = "grpc_asyncio",
request_type=document_understanding.ProcessDocumentRequest,
):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.Document(mime_type="mime_type_value", text="text_value",)
)
response = await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_understanding.ProcessDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
@pytest.mark.asyncio
async def test_process_document_async_from_dict():
await test_process_document_async(request_type=dict)
def test_process_document_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
call.return_value = document.Document()
client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_process_document_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document())
await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentUnderstandingServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentUnderstandingServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = DocumentUnderstandingServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentUnderstandingServiceGrpcTransport,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.DocumentUnderstandingServiceGrpcTransport,
)
def test_document_understanding_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.DocumentUnderstandingServiceTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_document_understanding_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DocumentUnderstandingServiceTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"batch_process_documents",
"process_document",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_document_understanding_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.DocumentUnderstandingServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_document_understanding_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.DocumentUnderstandingServiceTransport()
adc.assert_called_once()
def test_document_understanding_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
DocumentUnderstandingServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
def test_document_understanding_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentUnderstandingServiceGrpcTransport,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
],
)
def test_document_understanding_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_document_understanding_service_host_no_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com"
),
)
assert client.transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_host_with_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com:8000"
),
)
assert client.transport._host == "us-documentai.googleapis.com:8000"
def test_document_understanding_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_document_understanding_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentUnderstandingServiceGrpcTransport,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
],
)
def test_document_understanding_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentUnderstandingServiceGrpcTransport,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
],
)
def test_document_understanding_service_transport_channel_mtls_with_adc(
transport_class,
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_document_understanding_service_grpc_lro_client():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_document_understanding_service_grpc_lro_async_client():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DocumentUnderstandingServiceClient.common_billing_account_path(
billing_account
)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = DocumentUnderstandingServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DocumentUnderstandingServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = DocumentUnderstandingServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = DocumentUnderstandingServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DocumentUnderstandingServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = DocumentUnderstandingServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = DocumentUnderstandingServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DocumentUnderstandingServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = DocumentUnderstandingServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = DocumentUnderstandingServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DocumentUnderstandingServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DocumentUnderstandingServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = DocumentUnderstandingServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DocumentUnderstandingServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DocumentUnderstandingServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DocumentUnderstandingServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DocumentUnderstandingServiceClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geovinci.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
setup.py | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
# https://pagure.io/python-daemon/issue/18
'python-daemon<2.2.0',
'python-dateutil==2.7.5',
]
# Note: To support older versions of setuptools, we're explicitly not
# using conditional syntax (i.e. 'enum34>1.1.0;python_version<"3.4"').
# This syntax is a problem for setuptools as recent as `20.1.1`,
# published Feb 16, 2016.
if sys.version_info[:2] < (3, 4):
install_requires.append('enum34>1.1.0')
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<2.2.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
setup(
name='luigi',
version='2.8.3',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='The Luigi Authors',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.configuration',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main'
]
},
install_requires=install_requires,
extras_require={
'toml': ['toml<2.0.0'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Monitoring',
],
)
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
model_zoo/official/cv/resnet/eval.py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train resnet."""
import os
import random
import argparse
import numpy as np
from mindspore import context
from mindspore import dataset as de
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--net', type=str, default=None, help='Resnet Model, either resnet50 or resnet101')
parser.add_argument('--dataset', type=str, default=None, help='Dataset, either cifar10 or imagenet2012')
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
args_opt = parser.parse_args()
random.seed(1)
np.random.seed(1)
de.config.set_seed(1)
if args_opt.net == "resnet50":
from src.resnet import resnet50 as resnet
if args_opt.dataset == "cifar10":
from src.config import config1 as config
from src.dataset import create_dataset1 as create_dataset
else:
from src.config import config2 as config
from src.dataset import create_dataset2 as create_dataset
elif args_opt.net == "resnet101":
from src.resnet import resnet101 as resnet
from src.config import config3 as config
from src.dataset import create_dataset3 as create_dataset
else:
from src.resnet import se_resnet50 as resnet
from src.config import config4 as config
from src.dataset import create_dataset4 as create_dataset
if __name__ == '__main__':
target = args_opt.device_target
# init context
context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
if target != "GPU":
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(device_id=device_id)
# create dataset
dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size,
target=target)
step_size = dataset.get_dataset_size()
# define net
net = resnet(class_num=config.class_num)
# load checkpoint
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
# define loss, model
if args_opt.dataset == "imagenet2012":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean",
smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define model
model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
# eval model
res = model.eval(dataset)
print("result:", res, "ckpt=", args_opt.checkpoint_path)
| [] | [] | [
"DEVICE_ID"
] | [] | ["DEVICE_ID"] | python | 1 | 0 | |
packaging/piptool.py | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The piptool module imports pip requirements into Bazel rules."""
import argparse
import atexit
import json
import os
import pkgutil
import pkg_resources
import re
import shutil
import sys
import tempfile
import zipfile
# Note: We carefully import the following modules in a particular
# order, since these modules modify the import path and machinery.
import pkg_resources
def extract_packages(package_names):
"""Extract zipfile contents to disk and add to import path"""
# Set a safe extraction dir
extraction_tmpdir = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(
extraction_tmpdir, ignore_errors=True))
pkg_resources.set_extraction_path(extraction_tmpdir)
# Extract each package to disk
dirs_to_add = []
for package_name in package_names:
req = pkg_resources.Requirement.parse(package_name)
extraction_dir = pkg_resources.resource_filename(req, '')
dirs_to_add.append(extraction_dir)
# Add extracted directories to import path ahead of their zip file
# counterparts.
sys.path[0:0] = dirs_to_add
existing_pythonpath = os.environ.get('PYTHONPATH')
if existing_pythonpath:
dirs_to_add.extend(existing_pythonpath.split(':'))
os.environ['PYTHONPATH'] = ':'.join(dirs_to_add)
# Wheel, pip, and setuptools are much happier running from actual
# files on disk, rather than entries in a zipfile. Extract zipfile
# contents, add those contents to the path, then import them.
extract_packages(['pip', 'setuptools', 'wheel'])
# Defeat pip's attempt to mangle sys.path
saved_sys_path = sys.path
sys.path = sys.path[:]
import pip
sys.path = saved_sys_path
import setuptools
import wheel
def pip_main(argv):
# Extract the certificates from the PAR following the example of get-pip.py
# https://github.com/pypa/get-pip/blob/430ba37776ae2ad89/template.py#L164-L168
cert_tmpdir = tempfile.mkdtemp()
cert_path = os.path.join(cert_tmpdir, "cacert.pem")
atexit.register(lambda: shutil.rmtree(cert_tmpdir, ignore_errors=True))
with open(cert_path, "wb") as cert:
cert.write(pkgutil.get_data("pip._vendor.requests", "cacert.pem"))
argv = ["--isolated", "--disable-pip-version-check", "--cert", cert_path] + argv
return pip.main(argv)
from packaging.whl import Wheel
parser = argparse.ArgumentParser(
description='Import Python dependencies into Bazel.')
parser.add_argument('--python_interpreter', action='store',
help=('The Python interpreter to use when extracting '
'wheels.'))
parser.add_argument('--name', action='store',
help=('The namespace of the import.'))
parser.add_argument('--input', action='store',
help=('The requirements.txt file to import.'))
parser.add_argument('--output', action='store',
help=('The requirements.bzl file to export.'))
parser.add_argument('--directory', action='store',
help=('The directory into which to put .whl files.'))
parser.add_argument('--extra_pip_args', action='store',
help=('Extra arguments to pass down to pip.'))
def determine_possible_extras(whls):
"""Determines the list of possible "extras" for each .whl
The possibility of an extra is determined by looking at its
additional requirements, and determinine whether they are
satisfied by the complete list of available wheels.
Args:
whls: a list of Wheel objects
Returns:
a dict that is keyed by the Wheel objects in whls, and whose
values are lists of possible extras.
"""
whl_map = {
whl.distribution(): whl
for whl in whls
}
# TODO(mattmoor): Consider memoizing if this recursion ever becomes
# expensive enough to warrant it.
def is_possible(distro, extra):
distro = distro.replace("-", "_")
# If we don't have the .whl at all, then this isn't possible.
if distro not in whl_map:
return False
whl = whl_map[distro]
# If we have the .whl, and we don't need anything extra then
# we can satisfy this dependency.
if not extra:
return True
# If we do need something extra, then check the extra's
# dependencies to make sure they are fully satisfied.
for extra_dep in whl.dependencies(extra=extra):
req = pkg_resources.Requirement.parse(extra_dep)
# Check that the dep and any extras are all possible.
if not is_possible(req.project_name, None):
return False
for e in req.extras:
if not is_possible(req.project_name, e):
return False
# If all of the dependencies of the extra are satisfiable then
# it is possible to construct this dependency.
return True
return {
whl: [
extra
for extra in whl.extras()
if is_possible(whl.distribution(), extra)
]
for whl in whls
}
def main():
args = parser.parse_args()
# https://github.com/pypa/pip/blob/9.0.1/pip/__init__.py#L209
pip_args = ["wheel", "-w", args.directory, "-r", args.input]
if args.extra_pip_args:
pip_args += args.extra_pip_args.strip("\"").split()
if pip_main(pip_args):
sys.exit(1)
# Enumerate the .whl files we downloaded.
def list_whls():
dir = args.directory + '/'
for root, unused_dirnames, filenames in os.walk(dir):
for fname in filenames:
if fname.endswith('.whl'):
yield os.path.join(root, fname)
whls = [Wheel(path) for path in list_whls()]
possible_extras = determine_possible_extras(whls)
def repository_name(wheel):
return args.name + "_" + wheel.repository_suffix()
def whl_library(wheel):
# Indentation here matters. whl_library must be within the scope
# of the function below. We also avoid reimporting an existing WHL.
return """
if "{repo_name}" not in native.existing_rules():
whl_library(
name = "{repo_name}",
python_interpreter = "{python_interpreter}",
whl = "@{name}//:{path}",
requirements = "@{name}//:requirements.bzl",
extras = [{extras}]
)""".format(name=args.name, repo_name=repository_name(wheel),
python_interpreter=args.python_interpreter,
path=wheel.basename(),
extras=','.join([
'"%s"' % extra
for extra in possible_extras.get(wheel, [])
]))
whl_targets = ','.join([
','.join([
'"%s": "@%s//:pkg"' % (whl.distribution().lower(), repository_name(whl))
] + [
# For every extra that is possible from this requirements.txt
'"%s[%s]": "@%s//:%s"' % (whl.distribution().lower(), extra.lower(),
repository_name(whl), extra)
for extra in possible_extras.get(whl, [])
])
for whl in whls
])
with open(args.output, 'w') as f:
f.write("""\
# Install pip requirements.
#
# Generated from {input}
load("@rules_python//python:whl.bzl", "whl_library")
def pip_install():
{whl_libraries}
_requirements = {{
{mappings}
}}
all_requirements = _requirements.values()
def requirement(name):
name_key = name.replace("-", "_").lower()
if name_key not in _requirements:
fail("Could not find pip-provided dependency: '%s'" % name)
return _requirements[name_key]
""".format(input=args.input,
whl_libraries='\n'.join(map(whl_library, whls)) if whls else "pass",
mappings=whl_targets))
if __name__ == '__main__':
main()
| [] | [] | [
"PYTHONPATH"
] | [] | ["PYTHONPATH"] | python | 1 | 0 | |
vendor/k8s.io/client-go/1.4/rest/config.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path"
gruntime "runtime"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/client-go/1.4/pkg/api"
"k8s.io/client-go/1.4/pkg/api/unversioned"
"k8s.io/client-go/1.4/pkg/runtime"
"k8s.io/client-go/1.4/pkg/util/crypto"
"k8s.io/client-go/1.4/pkg/util/flowcontrol"
"k8s.io/client-go/1.4/pkg/version"
clientcmdapi "k8s.io/client-go/1.4/tools/clientcmd/api"
)
const (
DefaultQPS float32 = 5.0
DefaultBurst int = 10
)
// Config holds the common attributes that can be passed to a Kubernetes client on
// initialization.
type Config struct {
// Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
// If a URL is given then the (optional) Path of that URL represents a prefix that must
// be appended to all request URIs used to access the apiserver. This allows a frontend
// proxy to easily relocate all of the apiserver endpoints.
Host string
// APIPath is a sub-path that points to an API root.
APIPath string
// Prefix is the sub path of the server. If not specified, the client will set
// a default value. Use "/" to indicate the server root should be used
Prefix string
// ContentConfig contains settings that affect how objects are transformed when
// sent to the server.
ContentConfig
// Server requires Basic authentication
Username string
Password string
// Server requires Bearer authentication. This client will not attempt to use
// refresh tokens for an OAuth2 flow.
// TODO: demonstrate an OAuth2 compatible client.
BearerToken string
// Impersonate is the username that this RESTClient will impersonate
Impersonate string
// Server requires plugin-specified authentication.
AuthProvider *clientcmdapi.AuthProviderConfig
// Callback to persist config for AuthProvider.
AuthConfigPersister AuthProviderConfigPersister
// TLSClientConfig contains settings to enable transport layer security
TLSClientConfig
// Server should be accessed without verifying the TLS
// certificate. For testing only.
Insecure bool
// UserAgent is an optional field that specifies the caller of this request.
UserAgent string
// Transport may be used for custom HTTP behavior. This attribute may not
// be specified with the TLS client certificate options. Use WrapTransport
// for most client level operations.
Transport http.RoundTripper
// WrapTransport will be invoked for custom HTTP behavior after the underlying
// transport is initialized (either the transport created from TLSClientConfig,
// Transport, or http.DefaultTransport). The config may layer other RoundTrippers
// on top of the returned RoundTripper.
WrapTransport func(rt http.RoundTripper) http.RoundTripper
// QPS indicates the maximum QPS to the master from this client.
// If it's zero, the created RESTClient will use DefaultQPS: 5
QPS float32
// Maximum burst for throttle.
// If it's zero, the created RESTClient will use DefaultBurst: 10.
Burst int
// Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
RateLimiter flowcontrol.RateLimiter
// The maximum length of time to wait before giving up on a server request. A
// value of zero means no timeout. THIS VALUE IS NOT USED AT THE MOMENT. IT IS
// ONLY HERE TO KEEP E2E TESTS FROM BREAKING.
Timeout time.Duration
// Version forces a specific version to be used (if registered)
// Do we need this?
// Version string
}
// TLSClientConfig contains settings to enable transport layer security
type TLSClientConfig struct {
// Server requires TLS client certificate authentication
CertFile string
// Server requires TLS client certificate authentication
KeyFile string
// Trusted root certificates for server
CAFile string
// CertData holds PEM-encoded bytes (typically read from a client certificate file).
// CertData takes precedence over CertFile
CertData []byte
// KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
// KeyData takes precedence over KeyFile
KeyData []byte
// CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
// CAData takes precedence over CAFile
CAData []byte
}
type ContentConfig struct {
// AcceptContentTypes specifies the types the client will accept and is optional.
// If not set, ContentType will be used to define the Accept header
AcceptContentTypes string
// ContentType specifies the wire format used to communicate with the server.
// This value will be set as the Accept header on requests made to the server, and
// as the default content type on any object sent to the server. If not set,
// "application/json" is used.
ContentType string
// GroupVersion is the API version to talk to. Must be provided when initializing
// a RESTClient directly. When initializing a Client, will be set with the default
// code version.
GroupVersion *unversioned.GroupVersion
// NegotiatedSerializer is used for obtaining encoders and decoders for multiple
// supported media types.
NegotiatedSerializer runtime.NegotiatedSerializer
}
// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
// object. Note that a RESTClient may require fields that are optional when initializing a Client.
// A RESTClient created by this method is generic - it expects to operate on an API that follows
// the Kubernetes conventions, but may not be the Kubernetes API.
func RESTClientFor(config *Config) (*RESTClient, error) {
if config.GroupVersion == nil {
return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
}
if config.NegotiatedSerializer == nil {
return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
}
qps := config.QPS
if config.QPS == 0.0 {
qps = DefaultQPS
}
burst := config.Burst
if config.Burst == 0 {
burst = DefaultBurst
}
baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
if err != nil {
return nil, err
}
transport, err := TransportFor(config)
if err != nil {
return nil, err
}
var httpClient *http.Client
if transport != http.DefaultTransport {
httpClient = &http.Client{Transport: transport}
}
return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
}
// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
// the config.Version to be empty.
func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
if config.NegotiatedSerializer == nil {
return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient")
}
baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
if err != nil {
return nil, err
}
transport, err := TransportFor(config)
if err != nil {
return nil, err
}
var httpClient *http.Client
if transport != http.DefaultTransport {
httpClient = &http.Client{Transport: transport}
}
versionConfig := config.ContentConfig
if versionConfig.GroupVersion == nil {
v := unversioned.SchemeGroupVersion
versionConfig.GroupVersion = &v
}
return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
}
// SetKubernetesDefaults sets default values on the provided client config for accessing the
// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
func SetKubernetesDefaults(config *Config) error {
if len(config.UserAgent) == 0 {
config.UserAgent = DefaultKubernetesUserAgent()
}
return nil
}
// DefaultKubernetesUserAgent returns the default user agent that clients can use.
func DefaultKubernetesUserAgent() string {
commit := version.Get().GitCommit
if len(commit) > 7 {
commit = commit[:7]
}
if len(commit) == 0 {
commit = "unknown"
}
version := version.Get().GitVersion
seg := strings.SplitN(version, "-", 2)
version = seg[0]
return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit)
}
// InClusterConfig returns a config object which uses the service account
// kubernetes gives to pods. It's intended for clients that expect to be
// running inside a pod running on kubernetes. It will return an error if
// called from a process not running in a kubernetes environment.
func InClusterConfig() (*Config, error) {
host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
if len(host) == 0 || len(port) == 0 {
return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
}
token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey)
if err != nil {
return nil, err
}
tlsClientConfig := TLSClientConfig{}
rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey
if _, err := crypto.CertPoolFromFile(rootCAFile); err != nil {
glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
}
return &Config{
// TODO: switch to using cluster DNS.
Host: "https://" + net.JoinHostPort(host, port),
BearerToken: string(token),
TLSClientConfig: tlsClientConfig,
}, nil
}
// IsConfigTransportTLS returns true if and only if the provided
// config will result in a protected connection to the server when it
// is passed to rest.RESTClientFor(). Use to determine when to
// send credentials over the wire.
//
// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
// still possible.
func IsConfigTransportTLS(config Config) bool {
baseURL, _, err := defaultServerUrlFor(&config)
if err != nil {
return false
}
return baseURL.Scheme == "https"
}
// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
// either populated or were empty to start.
func LoadTLSFiles(c *Config) error {
var err error
c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
if err != nil {
return err
}
c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
if err != nil {
return err
}
c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
if err != nil {
return err
}
return nil
}
// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
// or an error if an error occurred reading the file
func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
if len(data) > 0 {
return data, nil
}
if len(file) > 0 {
fileData, err := ioutil.ReadFile(file)
if err != nil {
return []byte{}, err
}
return fileData, nil
}
return nil, nil
}
func AddUserAgent(config *Config, userAgent string) *Config {
fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
config.UserAgent = fullUserAgent
return config
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] | [] | [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] | [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
controllers/provider-gcp/cmd/gardener-extension-provider-gcp/app/app.go | // Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"os"
gcpinstall "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/apis/gcp/install"
gcpcmd "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/cmd"
gcpbackupbucket "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/backupbucket"
gcpbackupentry "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/backupentry"
gcpcontrolplane "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/controlplane"
"github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/healthcheck"
gcpinfrastructure "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/infrastructure"
gcpworker "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/controller/worker"
"github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/gcp"
gcpcontrolplanebackup "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/webhook/controlplanebackup"
gcpcontrolplaneexposure "github.com/gardener/gardener-extensions/controllers/provider-gcp/pkg/webhook/controlplaneexposure"
"github.com/gardener/gardener-extensions/pkg/controller"
controllercmd "github.com/gardener/gardener-extensions/pkg/controller/cmd"
"github.com/gardener/gardener-extensions/pkg/controller/worker"
"github.com/gardener/gardener-extensions/pkg/util"
webhookcmd "github.com/gardener/gardener-extensions/pkg/webhook/cmd"
machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// NewControllerManagerCommand creates a new command for running a GCP provider controller.
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
var (
restOpts = &controllercmd.RESTOptions{}
mgrOpts = &controllercmd.ManagerOptions{
LeaderElection: true,
LeaderElectionID: controllercmd.LeaderElectionNameID(gcp.Name),
LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
WebhookServerPort: 443,
WebhookCertDir: "/tmp/gardener-extensions-cert",
}
configFileOpts = &gcpcmd.ConfigOptions{}
// options for the backupbucket controller
backupBucketCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the backupentry controller
backupEntryCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the health care controller
healthCheckCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the controlplane controller
controlPlaneCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the infrastructure controller
infraCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
reconcileOpts = &controllercmd.ReconcilerOptions{}
// options for the worker controller
workerCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
workerReconcileOpts = &worker.Options{
DeployCRDs: true,
}
workerCtrlOptsUnprefixed = controllercmd.NewOptionAggregator(workerCtrlOpts, workerReconcileOpts)
// options for the webhook server
webhookServerOptions = &webhookcmd.ServerOptions{
Namespace: os.Getenv("WEBHOOK_CONFIG_NAMESPACE"),
}
controllerSwitches = gcpcmd.ControllerSwitchOptions()
webhookSwitches = gcpcmd.WebhookSwitchOptions()
webhookOptions = webhookcmd.NewAddToManagerOptions(gcp.Name, webhookServerOptions, webhookSwitches)
aggOption = controllercmd.NewOptionAggregator(
restOpts,
mgrOpts,
controllercmd.PrefixOption("backupbucket-", backupBucketCtrlOpts),
controllercmd.PrefixOption("backupentry-", backupEntryCtrlOpts),
controllercmd.PrefixOption("controlplane-", controlPlaneCtrlOpts),
controllercmd.PrefixOption("infrastructure-", infraCtrlOpts),
controllercmd.PrefixOption("worker-", &workerCtrlOptsUnprefixed),
controllercmd.PrefixOption("healthcheck-", healthCheckCtrlOpts),
configFileOpts,
controllerSwitches,
reconcileOpts,
webhookOptions,
)
)
cmd := &cobra.Command{
Use: fmt.Sprintf("%s-controller-manager", gcp.Name),
Run: func(cmd *cobra.Command, args []string) {
if err := aggOption.Complete(); err != nil {
controllercmd.LogErrAndExit(err, "Error completing options")
}
util.ApplyClientConnectionConfigurationToRESTConfig(configFileOpts.Completed().Config.ClientConnection, restOpts.Completed().Config)
if workerReconcileOpts.Completed().DeployCRDs {
if err := worker.ApplyMachineResourcesForConfig(ctx, restOpts.Completed().Config); err != nil {
controllercmd.LogErrAndExit(err, "Error ensuring the machine CRDs")
}
}
mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options())
if err != nil {
controllercmd.LogErrAndExit(err, "Could not instantiate manager")
}
scheme := mgr.GetScheme()
if err := controller.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := gcpinstall.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
// add common meta types to schema for controller-runtime to use v1.ListOptions
metav1.AddToGroupVersion(scheme, machinev1alpha1.SchemeGroupVersion)
// add types required for Health check
scheme.AddKnownTypes(machinev1alpha1.SchemeGroupVersion,
&machinev1alpha1.MachineDeploymentList{},
)
configFileOpts.Completed().ApplyETCDStorage(&gcpcontrolplaneexposure.DefaultAddOptions.ETCDStorage)
configFileOpts.Completed().ApplyETCDBackup(&gcpcontrolplanebackup.DefaultAddOptions.ETCDBackup)
configFileOpts.Completed().ApplyHealthCheckConfig(&healthcheck.DefaultAddOptions.HealthCheckConfig)
healthCheckCtrlOpts.Completed().Apply(&healthcheck.DefaultAddOptions.Controller)
backupBucketCtrlOpts.Completed().Apply(&gcpbackupbucket.DefaultAddOptions.Controller)
backupEntryCtrlOpts.Completed().Apply(&gcpbackupentry.DefaultAddOptions.Controller)
controlPlaneCtrlOpts.Completed().Apply(&gcpcontrolplane.DefaultAddOptions.Controller)
infraCtrlOpts.Completed().Apply(&gcpinfrastructure.DefaultAddOptions.Controller)
reconcileOpts.Completed().Apply(&gcpinfrastructure.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&gcpcontrolplane.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&gcpworker.DefaultAddOptions.IgnoreOperationAnnotation)
workerCtrlOpts.Completed().Apply(&gcpworker.DefaultAddOptions.Controller)
if _, _, err := webhookOptions.Completed().AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add webhooks to manager")
}
if err := controllerSwitches.Completed().AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add controllers to manager")
}
if err := mgr.Start(ctx.Done()); err != nil {
controllercmd.LogErrAndExit(err, "Error running manager")
}
},
}
aggOption.AddFlags(cmd.Flags())
return cmd
}
| [
"\"LEADER_ELECTION_NAMESPACE\"",
"\"WEBHOOK_CONFIG_NAMESPACE\""
] | [] | [
"LEADER_ELECTION_NAMESPACE",
"WEBHOOK_CONFIG_NAMESPACE"
] | [] | ["LEADER_ELECTION_NAMESPACE", "WEBHOOK_CONFIG_NAMESPACE"] | go | 2 | 0 | |
src/cmd/pprof/internal/driver/driver.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package driver implements the core pprof functionality. It can be
// parameterized with a flag implementation, fetch and symbolize
// mechanisms.
package driver
import (
"bytes"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"cmd/pprof/internal/commands"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
"cmd/pprof/internal/report"
"cmd/pprof/internal/tempfile"
)
// PProf acquires a profile, and symbolizes it using a profile
// manager. Then it generates a report formatted according to the
// options selected through the flags package.
func PProf(flagset plugin.FlagSet, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, overrides commands.Commands) error {
// Remove any temporary files created during pprof processing.
defer tempfile.Cleanup()
f, err := getFlags(flagset, overrides, ui)
if err != nil {
return err
}
obj.SetConfig(*f.flagTools)
sources := f.profileSource
if len(sources) > 1 {
source := sources[0]
// If the first argument is a supported object file, treat as executable.
if file, err := obj.Open(source, 0); err == nil {
file.Close()
f.profileExecName = source
sources = sources[1:]
} else if *f.flagBuildID == "" && isBuildID(source) {
f.flagBuildID = &source
sources = sources[1:]
}
}
// errMu protects concurrent accesses to errset and err. errset is set if an
// error is encountered by one of the goroutines grabbing a profile.
errMu, errset := sync.Mutex{}, false
// Fetch profiles.
wg := sync.WaitGroup{}
profs := make([]*profile.Profile, len(sources))
for i, source := range sources {
wg.Add(1)
go func(i int, src string) {
defer wg.Done()
p, grabErr := grabProfile(src, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
if grabErr != nil {
errMu.Lock()
defer errMu.Unlock()
errset, err = true, grabErr
return
}
profs[i] = p
}(i, source)
}
wg.Wait()
if errset {
return err
}
// Merge profiles.
prof := profs[0]
for _, p := range profs[1:] {
if err = prof.Merge(p, 1); err != nil {
return err
}
}
if *f.flagBase != "" {
// Fetch base profile and subtract from current profile.
base, err := grabProfile(*f.flagBase, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
if err != nil {
return err
}
if err = prof.Merge(base, -1); err != nil {
return err
}
}
if err := processFlags(prof, ui, f); err != nil {
return err
}
if !*f.flagRuntime {
prof.RemoveUninteresting()
}
if *f.flagInteractive {
return interactive(prof, obj, ui, f)
}
return generate(false, prof, obj, ui, f)
}
// isBuildID determines if the profile may contain a build ID, by
// checking that it is a string of hex digits.
func isBuildID(id string) bool {
return strings.Trim(id, "0123456789abcdefABCDEF") == ""
}
// adjustURL updates the profile source URL based on heuristics. It
// will append ?seconds=sec for CPU profiles if not already
// specified. Returns the hostname if the profile is remote.
func adjustURL(source string, sec int, ui plugin.UI) (adjusted, host string, duration time.Duration) {
// If there is a local file with this name, just use it.
if _, err := os.Stat(source); err == nil {
return source, "", 0
}
url, err := url.Parse(source)
// Automatically add http:// to URLs of the form hostname:port/path.
// url.Parse treats "hostname" as the Scheme.
if err != nil || (url.Host == "" && url.Scheme != "" && url.Scheme != "file") {
url, err = url.Parse("http://" + source)
if err != nil {
return source, url.Host, time.Duration(30) * time.Second
}
}
if scheme := strings.ToLower(url.Scheme); scheme == "" || scheme == "file" {
url.Scheme = ""
return url.String(), "", 0
}
values := url.Query()
if urlSeconds := values.Get("seconds"); urlSeconds != "" {
if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil {
if sec >= 0 {
ui.PrintErr("Overriding -seconds for URL ", source)
}
sec = int(us)
}
}
switch strings.ToLower(url.Path) {
case "", "/":
// Apply default /profilez.
url.Path = "/profilez"
case "/protoz":
// Rewrite to /profilez?type=proto
url.Path = "/profilez"
values.Set("type", "proto")
}
if hasDuration(url.Path) {
if sec > 0 {
duration = time.Duration(sec) * time.Second
values.Set("seconds", fmt.Sprintf("%d", sec))
} else {
// Assume default duration: 30 seconds
duration = 30 * time.Second
}
}
url.RawQuery = values.Encode()
return url.String(), url.Host, duration
}
func hasDuration(path string) bool {
for _, trigger := range []string{"profilez", "wallz", "/profile"} {
if strings.Contains(path, trigger) {
return true
}
}
return false
}
// preprocess does filtering and aggregation of a profile based on the
// requested options.
func preprocess(prof *profile.Profile, ui plugin.UI, f *flags) error {
if *f.flagFocus != "" || *f.flagIgnore != "" || *f.flagHide != "" {
focus, ignore, hide, err := compileFocusIgnore(*f.flagFocus, *f.flagIgnore, *f.flagHide)
if err != nil {
return err
}
fm, im, hm := prof.FilterSamplesByName(focus, ignore, hide)
warnNoMatches(fm, *f.flagFocus, "Focus", ui)
warnNoMatches(im, *f.flagIgnore, "Ignore", ui)
warnNoMatches(hm, *f.flagHide, "Hide", ui)
}
if *f.flagTagFocus != "" || *f.flagTagIgnore != "" {
focus, err := compileTagFilter(*f.flagTagFocus, ui)
if err != nil {
return err
}
ignore, err := compileTagFilter(*f.flagTagIgnore, ui)
if err != nil {
return err
}
fm, im := prof.FilterSamplesByTag(focus, ignore)
warnNoMatches(fm, *f.flagTagFocus, "TagFocus", ui)
warnNoMatches(im, *f.flagTagIgnore, "TagIgnore", ui)
}
return aggregate(prof, f)
}
func compileFocusIgnore(focus, ignore, hide string) (f, i, h *regexp.Regexp, err error) {
if focus != "" {
if f, err = regexp.Compile(focus); err != nil {
return nil, nil, nil, fmt.Errorf("parsing focus regexp: %v", err)
}
}
if ignore != "" {
if i, err = regexp.Compile(ignore); err != nil {
return nil, nil, nil, fmt.Errorf("parsing ignore regexp: %v", err)
}
}
if hide != "" {
if h, err = regexp.Compile(hide); err != nil {
return nil, nil, nil, fmt.Errorf("parsing hide regexp: %v", err)
}
}
return
}
func compileTagFilter(filter string, ui plugin.UI) (f func(string, string, int64) bool, err error) {
if filter == "" {
return nil, nil
}
if numFilter := parseTagFilterRange(filter); numFilter != nil {
ui.PrintErr("Interpreted '", filter, "' as range, not regexp")
return func(key, val string, num int64) bool {
if val != "" {
return false
}
return numFilter(num, key)
}, nil
}
fx, err := regexp.Compile(filter)
if err != nil {
return nil, err
}
return func(key, val string, num int64) bool {
if val == "" {
return false
}
return fx.MatchString(key + ":" + val)
}, nil
}
var tagFilterRangeRx = regexp.MustCompile("([[:digit:]]+)([[:alpha:]]+)")
// parseTagFilterRange returns a function to checks if a value is
// contained on the range described by a string. It can recognize
// strings of the form:
// "32kb" -- matches values == 32kb
// ":64kb" -- matches values <= 64kb
// "4mb:" -- matches values >= 4mb
// "12kb:64mb" -- matches values between 12kb and 64mb (both included).
func parseTagFilterRange(filter string) func(int64, string) bool {
ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2)
if len(ranges) == 0 {
return nil // No ranges were identified
}
v, err := strconv.ParseInt(ranges[0][1], 10, 64)
if err != nil {
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[0][1], err))
}
value, unit := report.ScaleValue(v, ranges[0][2], ranges[0][2])
if len(ranges) == 1 {
switch match := ranges[0][0]; filter {
case match:
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv == value
}
case match + ":":
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv >= value
}
case ":" + match:
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv <= value
}
}
return nil
}
if filter != ranges[0][0]+":"+ranges[1][0] {
return nil
}
if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil {
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[1][1], err))
}
value2, unit2 := report.ScaleValue(v, ranges[1][2], unit)
if unit != unit2 {
return nil
}
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv >= value && sv <= value2
}
}
func warnNoMatches(match bool, rx, option string, ui plugin.UI) {
if !match && rx != "" && rx != "." {
ui.PrintErr(option + " expression matched no samples: " + rx)
}
}
// grabProfile fetches and symbolizes a profile.
func grabProfile(source, exec, buildid string, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, f *flags) (*profile.Profile, error) {
source, host, duration := adjustURL(source, *f.flagSeconds, ui)
remote := host != ""
if remote {
ui.Print("Fetching profile from ", source)
if duration != 0 {
ui.Print("Please wait... (" + duration.String() + ")")
}
}
now := time.Now()
// Fetch profile from source.
// Give 50% slack on the timeout.
p, err := fetch(source, duration+duration/2, ui)
if err != nil {
return nil, err
}
// Update the time/duration if the profile source doesn't include it.
// TODO(rsilvera): Remove this when we remove support for legacy profiles.
if remote {
if p.TimeNanos == 0 {
p.TimeNanos = now.UnixNano()
}
if duration != 0 && p.DurationNanos == 0 {
p.DurationNanos = int64(duration)
}
}
// Replace executable/buildID with the options provided in the
// command line. Assume the executable is the first Mapping entry.
if exec != "" || buildid != "" {
if len(p.Mapping) == 0 {
// Create a fake mapping to hold the user option, and associate
// all samples to it.
m := &profile.Mapping{
ID: 1,
}
for _, l := range p.Location {
l.Mapping = m
}
p.Mapping = []*profile.Mapping{m}
}
if exec != "" {
p.Mapping[0].File = exec
}
if buildid != "" {
p.Mapping[0].BuildID = buildid
}
}
if err := sym(*f.flagSymbolize, source, p, obj, ui); err != nil {
return nil, err
}
// Save a copy of any remote profiles, unless the user is explicitly
// saving it.
if remote && !f.isFormat("proto") {
prefix := "pprof."
if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
prefix = prefix + filepath.Base(p.Mapping[0].File) + "."
}
if !strings.ContainsRune(host, os.PathSeparator) {
prefix = prefix + host + "."
}
for _, s := range p.SampleType {
prefix = prefix + s.Type + "."
}
dir := os.Getenv("PPROF_TMPDIR")
tempFile, err := tempfile.New(dir, prefix, ".pb.gz")
if err == nil {
if err = p.Write(tempFile); err == nil {
ui.PrintErr("Saved profile in ", tempFile.Name())
}
}
if err != nil {
ui.PrintErr("Could not save profile: ", err)
}
}
if err := p.Demangle(obj.Demangle); err != nil {
ui.PrintErr("Failed to demangle profile: ", err)
}
if err := p.CheckValid(); err != nil {
return nil, fmt.Errorf("Grab %s: %v", source, err)
}
return p, nil
}
type flags struct {
flagInteractive *bool // Accept commands interactively
flagCommands map[string]*bool // pprof commands without parameters
flagParamCommands map[string]*string // pprof commands with parameters
flagSVGPan *string // URL to fetch the SVG Pan library
flagOutput *string // Output file name
flagCum *bool // Sort by cumulative data
flagCallTree *bool // generate a context-sensitive call tree
flagAddresses *bool // Report at address level
flagLines *bool // Report at source line level
flagFiles *bool // Report at file level
flagFunctions *bool // Report at function level [default]
flagSymbolize *string // Symbolization options (=none to disable)
flagBuildID *string // Override build if for first mapping
flagNodeCount *int // Max number of nodes to show
flagNodeFraction *float64 // Hide nodes below <f>*total
flagEdgeFraction *float64 // Hide edges below <f>*total
flagTrim *bool // Set to false to ignore NodeCount/*Fraction
flagRuntime *bool // Show runtime call frames in memory profiles
flagFocus *string // Restricts to paths going through a node matching regexp
flagIgnore *string // Skips paths going through any nodes matching regexp
flagHide *string // Skips sample locations matching regexp
flagTagFocus *string // Restrict to samples tagged with key:value matching regexp
flagTagIgnore *string // Discard samples tagged with key:value matching regexp
flagDropNegative *bool // Skip negative values
flagBase *string // Source for base profile to user for comparison
flagSeconds *int // Length of time for dynamic profiles
flagTotalDelay *bool // Display total delay at each region
flagContentions *bool // Display number of delays at each region
flagMeanDelay *bool // Display mean delay at each region
flagInUseSpace *bool // Display in-use memory size
flagInUseObjects *bool // Display in-use object counts
flagAllocSpace *bool // Display allocated memory size
flagAllocObjects *bool // Display allocated object counts
flagDisplayUnit *string // Measurement unit to use on reports
flagDivideBy *float64 // Ratio to divide sample values
flagSampleIndex *int // Sample value to use in reports.
flagMean *bool // Use mean of sample_index over count
flagTools *string
profileSource []string
profileExecName string
extraUsage string
commands commands.Commands
}
func (f *flags) isFormat(format string) bool {
if fl := f.flagCommands[format]; fl != nil {
return *fl
}
if fl := f.flagParamCommands[format]; fl != nil {
return *fl != ""
}
return false
}
// String provides a printable representation for the current set of flags.
func (f *flags) String(p *profile.Profile) string {
var ret string
if ix := *f.flagSampleIndex; ix != -1 {
ret += fmt.Sprintf(" %-25s : %d (%s)\n", "sample_index", ix, p.SampleType[ix].Type)
}
if ix := *f.flagMean; ix {
ret += boolFlagString("mean")
}
if *f.flagDisplayUnit != "minimum" {
ret += stringFlagString("unit", *f.flagDisplayUnit)
}
switch {
case *f.flagInteractive:
ret += boolFlagString("interactive")
}
for name, fl := range f.flagCommands {
if *fl {
ret += boolFlagString(name)
}
}
if *f.flagCum {
ret += boolFlagString("cum")
}
if *f.flagCallTree {
ret += boolFlagString("call_tree")
}
switch {
case *f.flagAddresses:
ret += boolFlagString("addresses")
case *f.flagLines:
ret += boolFlagString("lines")
case *f.flagFiles:
ret += boolFlagString("files")
case *f.flagFunctions:
ret += boolFlagString("functions")
}
if *f.flagNodeCount != -1 {
ret += intFlagString("nodecount", *f.flagNodeCount)
}
ret += floatFlagString("nodefraction", *f.flagNodeFraction)
ret += floatFlagString("edgefraction", *f.flagEdgeFraction)
if *f.flagFocus != "" {
ret += stringFlagString("focus", *f.flagFocus)
}
if *f.flagIgnore != "" {
ret += stringFlagString("ignore", *f.flagIgnore)
}
if *f.flagHide != "" {
ret += stringFlagString("hide", *f.flagHide)
}
if *f.flagTagFocus != "" {
ret += stringFlagString("tagfocus", *f.flagTagFocus)
}
if *f.flagTagIgnore != "" {
ret += stringFlagString("tagignore", *f.flagTagIgnore)
}
return ret
}
func boolFlagString(label string) string {
return fmt.Sprintf(" %-25s : true\n", label)
}
func stringFlagString(label, value string) string {
return fmt.Sprintf(" %-25s : %s\n", label, value)
}
func intFlagString(label string, value int) string {
return fmt.Sprintf(" %-25s : %d\n", label, value)
}
func floatFlagString(label string, value float64) string {
return fmt.Sprintf(" %-25s : %f\n", label, value)
}
// Utility routines to set flag values.
func newBool(b bool) *bool {
return &b
}
func newString(s string) *string {
return &s
}
func newFloat64(fl float64) *float64 {
return &fl
}
func newInt(i int) *int {
return &i
}
func (f *flags) usage(ui plugin.UI) {
var commandMsg []string
for name, cmd := range f.commands {
if cmd.HasParam {
name = name + "=p"
}
commandMsg = append(commandMsg,
fmt.Sprintf(" -%-16s %s", name, cmd.Usage))
}
sort.Strings(commandMsg)
text := usageMsgHdr + strings.Join(commandMsg, "\n") + "\n" + usageMsg + "\n"
if f.extraUsage != "" {
text += f.extraUsage + "\n"
}
text += usageMsgVars
ui.Print(text)
}
func getFlags(flag plugin.FlagSet, overrides commands.Commands, ui plugin.UI) (*flags, error) {
f := &flags{
flagInteractive: flag.Bool("interactive", false, "Accepts commands interactively"),
flagCommands: make(map[string]*bool),
flagParamCommands: make(map[string]*string),
// Filename for file-based output formats, stdout by default.
flagOutput: flag.String("output", "", "Output filename for file-based outputs "),
// Comparisons.
flagBase: flag.String("base", "", "Source for base profile for comparison"),
flagDropNegative: flag.Bool("drop_negative", false, "Ignore negative differences"),
flagSVGPan: flag.String("svgpan", "https://www.cyberz.org/projects/SVGPan/SVGPan.js", "URL for SVGPan Library"),
// Data sorting criteria.
flagCum: flag.Bool("cum", false, "Sort by cumulative data"),
// Graph handling options.
flagCallTree: flag.Bool("call_tree", false, "Create a context-sensitive call tree"),
// Granularity of output resolution.
flagAddresses: flag.Bool("addresses", false, "Report at address level"),
flagLines: flag.Bool("lines", false, "Report at source line level"),
flagFiles: flag.Bool("files", false, "Report at source file level"),
flagFunctions: flag.Bool("functions", false, "Report at function level [default]"),
// Internal options.
flagSymbolize: flag.String("symbolize", "", "Options for profile symbolization"),
flagBuildID: flag.String("buildid", "", "Override build id for first mapping"),
// Filtering options
flagNodeCount: flag.Int("nodecount", -1, "Max number of nodes to show"),
flagNodeFraction: flag.Float64("nodefraction", 0.005, "Hide nodes below <f>*total"),
flagEdgeFraction: flag.Float64("edgefraction", 0.001, "Hide edges below <f>*total"),
flagTrim: flag.Bool("trim", true, "Honor nodefraction/edgefraction/nodecount defaults"),
flagRuntime: flag.Bool("runtime", false, "Show runtime call frames in memory profiles"),
flagFocus: flag.String("focus", "", "Restricts to paths going through a node matching regexp"),
flagIgnore: flag.String("ignore", "", "Skips paths going through any nodes matching regexp"),
flagHide: flag.String("hide", "", "Skips nodes matching regexp"),
flagTagFocus: flag.String("tagfocus", "", "Restrict to samples with tags in range or matched by regexp"),
flagTagIgnore: flag.String("tagignore", "", "Discard samples with tags in range or matched by regexp"),
// CPU profile options
flagSeconds: flag.Int("seconds", -1, "Length of time for dynamic profiles"),
// Heap profile options
flagInUseSpace: flag.Bool("inuse_space", false, "Display in-use memory size"),
flagInUseObjects: flag.Bool("inuse_objects", false, "Display in-use object counts"),
flagAllocSpace: flag.Bool("alloc_space", false, "Display allocated memory size"),
flagAllocObjects: flag.Bool("alloc_objects", false, "Display allocated object counts"),
flagDisplayUnit: flag.String("unit", "minimum", "Measurement units to display"),
flagDivideBy: flag.Float64("divide_by", 1.0, "Ratio to divide all samples before visualization"),
flagSampleIndex: flag.Int("sample_index", -1, "Index of sample value to report"),
flagMean: flag.Bool("mean", false, "Average sample value over first value (count)"),
// Contention profile options
flagTotalDelay: flag.Bool("total_delay", false, "Display total delay at each region"),
flagContentions: flag.Bool("contentions", false, "Display number of delays at each region"),
flagMeanDelay: flag.Bool("mean_delay", false, "Display mean delay at each region"),
flagTools: flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames"),
extraUsage: flag.ExtraUsage(),
}
// Flags used during command processing
interactive := &f.flagInteractive
svgpan := &f.flagSVGPan
f.commands = commands.PProf(functionCompleter, interactive, svgpan)
// Override commands
for name, cmd := range overrides {
f.commands[name] = cmd
}
for name, cmd := range f.commands {
if cmd.HasParam {
f.flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp")
} else {
f.flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format")
}
}
args := flag.Parse(func() { f.usage(ui) })
if len(args) == 0 {
return nil, fmt.Errorf("no profile source specified")
}
f.profileSource = args
// Instruct legacy heapz parsers to grab historical allocation data,
// instead of the default in-use data. Not available with tcmalloc.
if *f.flagAllocSpace || *f.flagAllocObjects {
profile.LegacyHeapAllocated = true
}
if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir == "" {
profileDir = os.Getenv("HOME") + "/pprof"
os.Setenv("PPROF_TMPDIR", profileDir)
if err := os.MkdirAll(profileDir, 0755); err != nil {
return nil, fmt.Errorf("failed to access temp dir %s: %v", profileDir, err)
}
}
return f, nil
}
func processFlags(p *profile.Profile, ui plugin.UI, f *flags) error {
flagDis := f.isFormat("disasm")
flagPeek := f.isFormat("peek")
flagWebList := f.isFormat("weblist")
flagList := f.isFormat("list")
if flagDis || flagWebList {
// Collect all samples at address granularity for assembly
// listing.
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(true)
f.flagLines = newBool(false)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(false)
}
if flagPeek {
// Collect all samples at function granularity for peek command
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(false)
f.flagLines = newBool(false)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(true)
}
if flagList {
// Collect all samples at fileline granularity for source
// listing.
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(false)
f.flagLines = newBool(true)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(false)
}
if !*f.flagTrim {
f.flagNodeCount = newInt(0)
f.flagNodeFraction = newFloat64(0)
f.flagEdgeFraction = newFloat64(0)
}
if oc := countFlagMap(f.flagCommands, f.flagParamCommands); oc == 0 {
f.flagInteractive = newBool(true)
} else if oc > 1 {
f.usage(ui)
return fmt.Errorf("must set at most one output format")
}
// Apply nodecount defaults for non-interactive mode. The
// interactive shell will apply defaults for the interactive mode.
if *f.flagNodeCount < 0 && !*f.flagInteractive {
switch {
default:
f.flagNodeCount = newInt(80)
case f.isFormat("text"):
f.flagNodeCount = newInt(0)
}
}
// Apply legacy options and diagnose conflicts.
if rc := countFlags([]*bool{f.flagAddresses, f.flagLines, f.flagFiles, f.flagFunctions}); rc == 0 {
f.flagFunctions = newBool(true)
} else if rc > 1 {
f.usage(ui)
return fmt.Errorf("must set at most one granularity option")
}
var err error
si, sm := *f.flagSampleIndex, *f.flagMean || *f.flagMeanDelay
si, err = sampleIndex(p, &f.flagTotalDelay, si, 1, "delay", "-total_delay", err)
si, err = sampleIndex(p, &f.flagMeanDelay, si, 1, "delay", "-mean_delay", err)
si, err = sampleIndex(p, &f.flagContentions, si, 0, "contentions", "-contentions", err)
si, err = sampleIndex(p, &f.flagInUseSpace, si, 1, "inuse_space", "-inuse_space", err)
si, err = sampleIndex(p, &f.flagInUseObjects, si, 0, "inuse_objects", "-inuse_objects", err)
si, err = sampleIndex(p, &f.flagAllocSpace, si, 1, "alloc_space", "-alloc_space", err)
si, err = sampleIndex(p, &f.flagAllocObjects, si, 0, "alloc_objects", "-alloc_objects", err)
if si == -1 {
// Use last value if none is requested.
si = len(p.SampleType) - 1
} else if si < 0 || si >= len(p.SampleType) {
err = fmt.Errorf("sample_index value %d out of range [0..%d]", si, len(p.SampleType)-1)
}
if err != nil {
f.usage(ui)
return err
}
f.flagSampleIndex, f.flagMean = newInt(si), newBool(sm)
return nil
}
func sampleIndex(p *profile.Profile, flag **bool,
sampleIndex int,
newSampleIndex int,
sampleType, option string,
err error) (int, error) {
if err != nil || !**flag {
return sampleIndex, err
}
*flag = newBool(false)
if sampleIndex != -1 {
return 0, fmt.Errorf("set at most one sample value selection option")
}
if newSampleIndex >= len(p.SampleType) ||
p.SampleType[newSampleIndex].Type != sampleType {
return 0, fmt.Errorf("option %s not valid for this profile", option)
}
return newSampleIndex, nil
}
func countFlags(bs []*bool) int {
var c int
for _, b := range bs {
if *b {
c++
}
}
return c
}
func countFlagMap(bms map[string]*bool, bmrxs map[string]*string) int {
var c int
for _, b := range bms {
if *b {
c++
}
}
for _, s := range bmrxs {
if *s != "" {
c++
}
}
return c
}
var usageMsgHdr = "usage: pprof [options] [binary] <profile source> ...\n" +
"Output format (only set one):\n"
var usageMsg = "Output file parameters (for file-based output formats):\n" +
" -output=f Generate output on file f (stdout by default)\n" +
"Output granularity (only set one):\n" +
" -functions Report at function level [default]\n" +
" -files Report at source file level\n" +
" -lines Report at source line level\n" +
" -addresses Report at address level\n" +
"Comparison options:\n" +
" -base <profile> Show delta from this profile\n" +
" -drop_negative Ignore negative differences\n" +
"Sorting options:\n" +
" -cum Sort by cumulative data\n\n" +
"Dynamic profile options:\n" +
" -seconds=N Length of time for dynamic profiles\n" +
"Profile trimming options:\n" +
" -nodecount=N Max number of nodes to show\n" +
" -nodefraction=f Hide nodes below <f>*total\n" +
" -edgefraction=f Hide edges below <f>*total\n" +
"Sample value selection option (by index):\n" +
" -sample_index Index of sample value to display\n" +
" -mean Average sample value over first value\n" +
"Sample value selection option (for heap profiles):\n" +
" -inuse_space Display in-use memory size\n" +
" -inuse_objects Display in-use object counts\n" +
" -alloc_space Display allocated memory size\n" +
" -alloc_objects Display allocated object counts\n" +
"Sample value selection option (for contention profiles):\n" +
" -total_delay Display total delay at each region\n" +
" -contentions Display number of delays at each region\n" +
" -mean_delay Display mean delay at each region\n" +
"Filtering options:\n" +
" -runtime Show runtime call frames in memory profiles\n" +
" -focus=r Restricts to paths going through a node matching regexp\n" +
" -ignore=r Skips paths going through any nodes matching regexp\n" +
" -tagfocus=r Restrict to samples tagged with key:value matching regexp\n" +
" Restrict to samples with numeric tags in range (eg \"32kb:1mb\")\n" +
" -tagignore=r Discard samples tagged with key:value matching regexp\n" +
" Avoid samples with numeric tags in range (eg \"1mb:\")\n" +
"Miscellaneous:\n" +
" -call_tree Generate a context-sensitive call tree\n" +
" -unit=u Convert all samples to unit u for display\n" +
" -divide_by=f Scale all samples by dividing them by f\n" +
" -buildid=id Override build id for main binary in profile\n" +
" -tools=path Search path for object-level tools\n" +
" -help This message"
var usageMsgVars = "Environment Variables:\n" +
" PPROF_TMPDIR Location for temporary files (default $HOME/pprof)\n" +
" PPROF_TOOLS Search path for object-level tools\n" +
" PPROF_BINARY_PATH Search path for local binary files\n" +
" default: $HOME/pprof/binaries\n" +
" finds binaries by $name and $buildid/$name"
func aggregate(prof *profile.Profile, f *flags) error {
switch {
case f.isFormat("proto"), f.isFormat("raw"):
// No aggregation for raw profiles.
case f.isFormat("callgrind"):
// Aggregate to file/line for callgrind.
fallthrough
case *f.flagLines:
return prof.Aggregate(true, true, true, true, false)
case *f.flagFiles:
return prof.Aggregate(true, false, true, false, false)
case *f.flagFunctions:
return prof.Aggregate(true, true, false, false, false)
case f.isFormat("weblist"), f.isFormat("disasm"):
return prof.Aggregate(false, true, true, true, true)
}
return nil
}
// parseOptions parses the options into report.Options
// Returns a function to postprocess the report after generation.
func parseOptions(f *flags) (o *report.Options, p commands.PostProcessor, err error) {
if *f.flagDivideBy == 0 {
return nil, nil, fmt.Errorf("zero divisor specified")
}
o = &report.Options{
CumSort: *f.flagCum,
CallTree: *f.flagCallTree,
PrintAddresses: *f.flagAddresses,
DropNegative: *f.flagDropNegative,
Ratio: 1 / *f.flagDivideBy,
NodeCount: *f.flagNodeCount,
NodeFraction: *f.flagNodeFraction,
EdgeFraction: *f.flagEdgeFraction,
OutputUnit: *f.flagDisplayUnit,
}
for cmd, b := range f.flagCommands {
if *b {
pcmd := f.commands[cmd]
o.OutputFormat = pcmd.Format
return o, pcmd.PostProcess, nil
}
}
for cmd, rx := range f.flagParamCommands {
if *rx != "" {
pcmd := f.commands[cmd]
if o.Symbol, err = regexp.Compile(*rx); err != nil {
return nil, nil, fmt.Errorf("parsing -%s regexp: %v", cmd, err)
}
o.OutputFormat = pcmd.Format
return o, pcmd.PostProcess, nil
}
}
return nil, nil, fmt.Errorf("no output format selected")
}
type sampleValueFunc func(*profile.Sample) int64
// sampleFormat returns a function to extract values out of a profile.Sample,
// and the type/units of those values.
func sampleFormat(p *profile.Profile, f *flags) (sampleValueFunc, string, string) {
valueIndex := *f.flagSampleIndex
if *f.flagMean {
return meanExtractor(valueIndex), "mean_" + p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
}
return valueExtractor(valueIndex), p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
}
func valueExtractor(ix int) sampleValueFunc {
return func(s *profile.Sample) int64 {
return s.Value[ix]
}
}
func meanExtractor(ix int) sampleValueFunc {
return func(s *profile.Sample) int64 {
if s.Value[0] == 0 {
return 0
}
return s.Value[ix] / s.Value[0]
}
}
func generate(interactive bool, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
o, postProcess, err := parseOptions(f)
if err != nil {
return err
}
var w io.Writer
if *f.flagOutput == "" {
w = os.Stdout
} else {
ui.PrintErr("Generating report in ", *f.flagOutput)
outputFile, err := os.Create(*f.flagOutput)
if err != nil {
return err
}
defer outputFile.Close()
w = outputFile
}
if prof.Empty() {
return fmt.Errorf("profile is empty")
}
value, stype, unit := sampleFormat(prof, f)
o.SampleType = stype
rpt := report.New(prof, *o, value, unit)
// Do not apply filters if we're just generating a proto, so we
// still have all the data.
if o.OutputFormat != report.Proto {
// Delay applying focus/ignore until after creating the report so
// the report reflects the total number of samples.
if err := preprocess(prof, ui, f); err != nil {
return err
}
}
if postProcess == nil {
return report.Generate(w, rpt, obj)
}
var dot bytes.Buffer
if err = report.Generate(&dot, rpt, obj); err != nil {
return err
}
return postProcess(&dot, w, ui)
}
| [
"\"PPROF_TMPDIR\"",
"\"PPROF_TOOLS\"",
"\"PPROF_TMPDIR\"",
"\"HOME\""
] | [] | [
"PPROF_TOOLS",
"PPROF_TMPDIR",
"HOME"
] | [] | ["PPROF_TOOLS", "PPROF_TMPDIR", "HOME"] | go | 3 | 0 | |
pkg/credentials/iam_aws.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"bufio"
"context"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
)
// DefaultExpiryWindow - Default expiry window.
// ExpiryWindow will allow the credentials to trigger refreshing
// prior to the credentials actually expiring. This is beneficial
// so race conditions with expiring credentials do not cause
// request to fail unexpectedly due to ExpiredTokenException exceptions.
// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration.
// When used the tokens refresh will be triggered when 80% of the elapsed
// time until the actual expiration time is passed.
const DefaultExpiryWindow = -1
// A IAM retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
type IAM struct {
Expiry
// Required http Client to use when connecting to IAM metadata service.
Client *http.Client
// Custom endpoint to fetch IAM role credentials.
Endpoint string
}
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
const (
defaultIAMRoleEndpoint = "http://169.254.169.254"
defaultECSRoleEndpoint = "http://169.254.170.2"
defaultSTSRoleEndpoint = "https://sts.amazonaws.com"
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
tokenPath = "/latest/api/token"
tokenTTL = "21600"
tokenRequestHeader = "X-aws-ec2-metadata-token"
)
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
func NewIAM(endpoint string) *Credentials {
return New(&IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
Endpoint: endpoint,
})
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired
func (m *IAM) Retrieve() (Value, error) {
token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
var roleCreds ec2RoleCredRespBody
var err error
endpoint := m.Endpoint
switch {
case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0:
if len(endpoint) == 0 {
if len(os.Getenv("AWS_REGION")) > 0 {
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn"
} else {
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com"
}
} else {
endpoint = defaultSTSRoleEndpoint
}
}
creds := &STSWebIdentity{
Client: m.Client,
STSEndpoint: endpoint,
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
if err != nil {
return nil, err
}
return &WebIdentityToken{Token: string(token)}, nil
},
RoleARN: os.Getenv("AWS_ROLE_ARN"),
roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
}
stsWebIdentityCreds, err := creds.Retrieve()
if err == nil {
m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
}
return stsWebIdentityCreds, err
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0:
if len(endpoint) == 0 {
endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint,
os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"))
}
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0:
if len(endpoint) == 0 {
endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
var ok bool
if ok, err = isLoopback(endpoint); !ok {
if err == nil {
err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
}
break
}
}
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
default:
roleCreds, err = getCredentials(m.Client, endpoint)
}
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
SignerType: SignatureV4,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
// Unused params.
LastUpdated time.Time
Type string
}
// Get the final IAM role URL where the request will
// be sent to fetch the rolling access credentials.
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
func getIAMRoleURL(endpoint string) (*url.URL, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
u.Path = defaultIAMSecurityCredsPath
return u, nil
}
// listRoleNames lists of credential role names associated
// with the current EC2 service. If there are no credentials,
// or there is an error making or receiving the request.
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) {
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
if token != "" {
req.Header.Add(tokenRequestHeader, token)
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New(resp.Status)
}
credsList := []string{}
s := bufio.NewScanner(resp.Body)
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, err
}
return credsList, nil
}
func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) {
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil {
return ec2RoleCredRespBody{}, err
}
if token != "" {
req.Header.Set("Authorization", token)
}
resp, err := client.Do(req)
if err != nil {
return ec2RoleCredRespBody{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ec2RoleCredRespBody{}, errors.New(resp.Status)
}
respCreds := ec2RoleCredRespBody{}
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{}, err
}
return respCreds, nil
}
func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil)
if err != nil {
return "", err
}
req.Header.Add(tokenRequestTTLHeader, tokenTTL)
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode != http.StatusOK {
return "", errors.New(resp.Status)
}
return string(data), nil
}
// getCredentials - obtains the credentials from the IAM role name associated with
// the current EC2 service.
//
// If the credentials cannot be found, or there is an error
// reading the response an error will be returned.
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
if endpoint == "" {
endpoint = defaultIAMRoleEndpoint
}
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
token, _ := fetchIMDSToken(client, endpoint)
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
u, err := getIAMRoleURL(endpoint)
if err != nil {
return ec2RoleCredRespBody{}, err
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
roleNames, err := listRoleNames(client, u, token)
if err != nil {
return ec2RoleCredRespBody{}, err
}
if len(roleNames) == 0 {
return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
// - An instance profile can contain only one IAM role. This limit cannot be increased.
roleName := roleNames[0]
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
// The following command retrieves the security credentials for an
// IAM role named `s3access`.
//
// $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
//
u.Path = path.Join(u.Path, roleName)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return ec2RoleCredRespBody{}, err
}
if token != "" {
req.Header.Add(tokenRequestHeader, token)
}
resp, err := client.Do(req)
if err != nil {
return ec2RoleCredRespBody{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ec2RoleCredRespBody{}, errors.New(resp.Status)
}
respCreds := ec2RoleCredRespBody{}
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{}, err
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
}
return respCreds, nil
}
// isLoopback identifies if a uri's host is on a loopback address
func isLoopback(uri string) (bool, error) {
u, err := url.Parse(uri)
if err != nil {
return false, err
}
host := u.Hostname()
if len(host) == 0 {
return false, fmt.Errorf("can't parse host from uri: %s", uri)
}
ips, err := net.LookupHost(host)
if err != nil {
return false, err
}
for _, ip := range ips {
if !net.ParseIP(ip).IsLoopback() {
return false, nil
}
}
return true, nil
}
| [
"\"AWS_CONTAINER_AUTHORIZATION_TOKEN\"",
"\"AWS_WEB_IDENTITY_TOKEN_FILE\"",
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"AWS_WEB_IDENTITY_TOKEN_FILE\"",
"\"AWS_ROLE_ARN\"",
"\"AWS_ROLE_SESSION_NAME\"",
"\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\"",
"\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\"",
"\"AWS_CONTAINER_CREDENTIALS_FULL_URI\"",
"\"AWS_CONTAINER_CREDENTIALS_FULL_URI\""
] | [] | [
"AWS_ROLE_ARN",
"AWS_REGION",
"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI",
"AWS_CONTAINER_AUTHORIZATION_TOKEN",
"AWS_CONTAINER_CREDENTIALS_FULL_URI",
"AWS_WEB_IDENTITY_TOKEN_FILE",
"AWS_ROLE_SESSION_NAME"
] | [] | ["AWS_ROLE_ARN", "AWS_REGION", "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "AWS_CONTAINER_AUTHORIZATION_TOKEN", "AWS_CONTAINER_CREDENTIALS_FULL_URI", "AWS_WEB_IDENTITY_TOKEN_FILE", "AWS_ROLE_SESSION_NAME"] | go | 7 | 0 | |
internal/testenv/testenv.go | // Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package testenv contains helper functions for skipping tests
// based on which tools are present in the environment.
package testenv
import (
"bytes"
"fmt"
"go/build"
"io/ioutil"
"os"
"runtime"
"strings"
"sync"
exec "golang.org/x/sys/execabs"
)
// Testing is an abstraction of a *testing.T.
type Testing interface {
Skipf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
type helperer interface {
Helper()
}
// packageMainIsDevel reports whether the module containing package main
// is a development version (if module information is available).
//
// Builds in GOPATH mode and builds that lack module information are assumed to
// be development versions.
var packageMainIsDevel = func() bool { return true }
var checkGoGoroot struct {
once sync.Once
err error
}
func hasTool(tool string) error {
if tool == "cgo" {
enabled, err := cgoEnabled(false)
if err != nil {
return fmt.Errorf("checking cgo: %v", err)
}
if !enabled {
return fmt.Errorf("cgo not enabled")
}
return nil
}
_, err := exec.LookPath(tool)
if err != nil {
return err
}
switch tool {
case "patch":
// check that the patch tools supports the -o argument
temp, err := ioutil.TempFile("", "patch-test")
if err != nil {
return err
}
temp.Close()
defer os.Remove(temp.Name())
cmd := exec.Command(tool, "-o", temp.Name())
if err := cmd.Run(); err != nil {
return err
}
case "go":
checkGoGoroot.once.Do(func() {
// Ensure that the 'go' command found by exec.LookPath is from the correct
// GOROOT. Otherwise, 'some/path/go test ./...' will test against some
// version of the 'go' binary other than 'some/path/go', which is almost
// certainly not what the user intended.
out, err := exec.Command(tool, "env", "GOROOT").CombinedOutput()
if err != nil {
checkGoGoroot.err = err
return
}
GOROOT := strings.TrimSpace(string(out))
if GOROOT != runtime.GOROOT() {
checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT())
}
})
if checkGoGoroot.err != nil {
return checkGoGoroot.err
}
case "diff":
// Check that diff is the GNU version, needed for the -u argument and
// to report missing newlines at the end of files.
out, err := exec.Command(tool, "-version").Output()
if err != nil {
return err
}
if !bytes.Contains(out, []byte("GNU diffutils")) {
return fmt.Errorf("diff is not the GNU version")
}
}
return nil
}
func cgoEnabled(bypassEnvironment bool) (bool, error) {
cmd := exec.Command("go", "env", "CGO_ENABLED")
if bypassEnvironment {
cmd.Env = append(append([]string(nil), os.Environ()...), "CGO_ENABLED=")
}
out, err := cmd.CombinedOutput()
if err != nil {
return false, err
}
enabled := strings.TrimSpace(string(out))
return enabled == "1", nil
}
func allowMissingTool(tool string) bool {
if runtime.GOOS == "android" {
// Android builds generally run tests on a separate machine from the build,
// so don't expect any external tools to be available.
return true
}
switch tool {
case "cgo":
if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-nocgo") {
// Explicitly disabled on -nocgo builders.
return true
}
if enabled, err := cgoEnabled(true); err == nil && !enabled {
// No platform support.
return true
}
case "go":
if os.Getenv("GO_BUILDER_NAME") == "illumos-amd64-joyent" {
// Work around a misconfigured builder (see https://golang.org/issue/33950).
return true
}
case "diff":
if os.Getenv("GO_BUILDER_NAME") != "" {
return true
}
case "patch":
if os.Getenv("GO_BUILDER_NAME") != "" {
return true
}
}
// If a developer is actively working on this test, we expect them to have all
// of its dependencies installed. However, if it's just a dependency of some
// other module (for example, being run via 'go test all'), we should be more
// tolerant of unusual environments.
return !packageMainIsDevel()
}
// NeedsTool skips t if the named tool is not present in the path.
// As a special case, "cgo" means "go" is present and can compile cgo programs.
func NeedsTool(t Testing, tool string) {
if t, ok := t.(helperer); ok {
t.Helper()
}
err := hasTool(tool)
if err == nil {
return
}
if allowMissingTool(tool) {
t.Skipf("skipping because %s tool not available: %v", tool, err)
} else {
t.Fatalf("%s tool not available: %v", tool, err)
}
}
// ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the
// current machine is a builder known to have scarce resources.
//
// It should be called from within a TestMain function.
func ExitIfSmallMachine() {
switch b := os.Getenv("GO_BUILDER_NAME"); b {
case "linux-arm-scaleway":
// "linux-arm" was renamed to "linux-arm-scaleway" in CL 303230.
fmt.Fprintln(os.Stderr, "skipping test: linux-arm-scaleway builder lacks sufficient memory (https://golang.org/issue/32834)")
case "plan9-arm":
fmt.Fprintln(os.Stderr, "skipping test: plan9-arm builder lacks sufficient memory (https://golang.org/issue/38772)")
case "netbsd-arm-bsiegert", "netbsd-arm64-bsiegert":
// As of 2021-06-02, these builders are running with GO_TEST_TIMEOUT_SCALE=10,
// and there is only one of each. We shouldn't waste those scarce resources
// running very slow tests.
fmt.Fprintf(os.Stderr, "skipping test: %s builder is very slow\n", b)
default:
return
}
os.Exit(0)
}
// Go1Point returns the x in Go 1.x.
func Go1Point() int {
for i := len(build.Default.ReleaseTags) - 1; i >= 0; i-- {
var version int
if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil {
continue
}
return version
}
panic("bad release tags")
}
// NeedsGo1Point skips t if the Go version used to run the test is older than
// 1.x.
func NeedsGo1Point(t Testing, x int) {
if t, ok := t.(helperer); ok {
t.Helper()
}
if Go1Point() < x {
t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x)
}
}
| [
"\"GO_BUILDER_NAME\"",
"\"GO_BUILDER_NAME\"",
"\"GO_BUILDER_NAME\"",
"\"GO_BUILDER_NAME\"",
"\"GO_BUILDER_NAME\""
] | [] | [
"GO_BUILDER_NAME"
] | [] | ["GO_BUILDER_NAME"] | go | 1 | 0 | |
Problem Solving/Algorithms/Number Line Jumps.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
static String kangaroo(int x1, int v1, int x2, int v2) {
if (v1 > v2) {
int remainder = (x1 - x2) % (v2 - v1);
if (remainder == 0) {
return "YES";
}
}
return "NO";
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] x1V1X2V2 = scanner.nextLine().split(" ");
int x1 = Integer.parseInt(x1V1X2V2[0]);
int v1 = Integer.parseInt(x1V1X2V2[1]);
int x2 = Integer.parseInt(x1V1X2V2[2]);
int v2 = Integer.parseInt(x1V1X2V2[3]);
String result = kangaroo(x1, v1, x2, v2);
bufferedWriter.write(result);
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
] | [] | [
"OUTPUT_PATH"
] | [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
cmd/webhook/main.go | package main
import (
"fmt"
"k8s.io/api/admission/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"log"
"net/http"
"os"
"path/filepath"
)
const (
tlsDir = `/run/secrets/tls`
tlsCertFile = `tls.crt`
tlsKeyFile = `tls.key`
envVarName = "VIRTUAL_ENVIRONMENT_TAG"
sidecarContainerName = "istio-proxy"
)
var (
podResource = metav1.GroupVersionResource{Version: "v1", Resource: "pods"}
)
// injectEnvironmentTag read the environment tag from pod label, and save to the sidecar container as an environment
// variable named `VIRTUAL_ENVIRONMENT_TAG`
func injectEnvironmentTag(req *v1beta1.AdmissionRequest) ([]patchOperation, error) {
// This handler should only get called on Pod objects as per the MutatingWebhookConfiguration in the YAML file.
// However, if (for whatever reason) this gets invoked on an object of a different kind, issue a log message but
// let the object request pass through otherwise.
if req.Resource != podResource {
log.Printf("expect resource to be %s", podResource)
return nil, nil
}
// Parse the Pod object.
raw := req.Object.Raw
pod := corev1.Pod{}
if _, _, err := universalDeserializer.Decode(raw, nil, &pod); err != nil {
return nil, fmt.Errorf("could not deserialize pod object: %v", err)
}
// Retrieve the environment label name from pod label
envLabel := os.Getenv("envLabel")
if envLabel == "" {
log.Fatalln("cannot determine env label !!")
}
// Retrieve the environment tag from pod label
envTag := ""
if value, ok := pod.Labels[envLabel]; ok {
envTag = value
}
if envTag == "" {
log.Printf("no environment tag found on pod %s", getPodName(pod))
return nil, nil
}
sidecarContainerIndex := -1
for i, container := range pod.Spec.Containers {
if container.Name == sidecarContainerName {
sidecarContainerIndex = i
}
}
if sidecarContainerIndex < 0 {
log.Printf("no sidecar container found on pod %s", getPodName(pod))
return nil, nil
}
envVarIndex := -1
for i, envVar := range pod.Spec.Containers[sidecarContainerIndex].Env {
if envVar.Name == envVarName {
envVarIndex = i
}
}
// Create patch operations to apply environment tag
var patches []patchOperation
if envVarIndex < 0 {
patches = append(patches, patchOperation{
Op: "add",
Path: fmt.Sprintf("/spec/containers/%d/env/0", sidecarContainerIndex),
Value: corev1.EnvVar{Name: envVarName, Value: envTag},
})
} else {
patches = append(patches, patchOperation{
Op: "replace",
Path: fmt.Sprintf("/spec/containers/%d/env/%d/value", sidecarContainerIndex, envVarIndex),
Value: envTag,
})
}
log.Printf("marked %s as %s", getPodName(pod), envTag)
return patches, nil
}
func getPodName(pod corev1.Pod) string {
if pod.Name == "" {
return pod.GenerateName
}
return pod.Name
}
func main() {
certPath := filepath.Join(tlsDir, tlsCertFile)
keyPath := filepath.Join(tlsDir, tlsKeyFile)
log.Printf("sidecar environment tag injector starting")
mux := http.NewServeMux()
mux.Handle("/inject", admitFuncHandler(injectEnvironmentTag))
server := &http.Server{
// We listen on port 8443 such that we do not need root privileges or extra capabilities for this server.
// The Service object will take care of mapping this port to the HTTPS port 443.
Addr: ":8443",
Handler: mux,
}
log.Fatal(server.ListenAndServeTLS(certPath, keyPath))
}
| [
"\"envLabel\""
] | [] | [
"envLabel"
] | [] | ["envLabel"] | go | 1 | 0 | |
example/main.go | package main
import (
"github.com/diggs/glog"
"github.com/diggs/gone"
"net"
"os"
"time"
)
func redisLock(id string, value string) (gone.GoneLock, error) {
return gone.NewRedisLock(&net.TCPAddr{Port: 6379}, id, value)
}
func runner(r *gone.GoneRunner) {
ticker := time.NewTicker(25 * time.Second).C
for {
select {
case <-r.Exit:
glog.Debugf("Exit signalled; exiting runner.")
return
case data := <-r.Data:
glog.Debugf("Received data: %v", string(data))
case <-ticker:
err := r.Extend()
if err != nil {
glog.Debugf("Failed to extend lock; exiting runner: %v", err)
return
}
}
}
}
func main() {
glog.SetSeverity("debug")
defer glog.Flush()
g, err := gone.NewGone("tcp://127.0.0.1:"+os.Getenv("PORT"), redisLock)
if err != nil {
panic(err.Error())
}
r, err := g.RunOne("irc:#connectrix:irc.freenode.net", runner)
if err != nil {
panic(err.Error())
}
go func() {
for {
err = r.SendData([]byte("hello world"))
if err != nil {
glog.Debugf("Send err: %v", err)
}
time.Sleep(5 * time.Second)
}
}()
for {
select {
case <-r.Exit:
glog.Info("Runner quit.")
return
}
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
backend/data/postgres.go | package data
import (
"fmt"
"github.com/go-pg/pg/v10"
"github.com/go-pg/pg/v10/orm"
"github.com/eikona-org/eikona/v2/data/datamodels"
"os"
)
var db *pg.DB
func Init() error {
opt, err := pg.ParseURL(os.Getenv("DATABASE_URL"))
if err != nil {
panic(err)
}
db = pg.Connect(opt)
err = createSchema()
if err != nil {
panic(err)
}
return nil
}
func GetDbConnection() *pg.Conn {
return db.Conn()
}
func createSchema() error {
fmt.Println("Creating DB Schema...")
db := GetDbConnection()
defer db.Close()
transaction, transactionError := db.Begin()
if transactionError != nil {
panic(transactionError)
}
defer func() {
if r := recover(); r != nil {
fmt.Printf("Something went wrong during Schema creation: %s\n", r)
transaction.Rollback()
panic(r)
}
}()
models := []interface{}{
(*data.Image)(nil),
(*data.User)(nil),
(*data.Organization)(nil),
(*data.Process)(nil),
(*data.ProcessingStep)(nil),
}
for _, model := range models {
err := db.Model(model).CreateTable(&orm.CreateTableOptions{
IfNotExists: true,
})
if err != nil {
fmt.Println("Error occurred! Rolling back...")
transaction.Rollback()
return err
}
}
transaction.Commit()
fmt.Println("DB Schema created!")
err := healthCheck()
if err != nil {
return err
}
return nil
}
func healthCheck() error {
fmt.Println("Performing DB Health Check...")
db := GetDbConnection()
context := db.Context()
err := db.Ping(context)
if err != nil {
panic(err)
}
transaction, transactionError := db.Begin()
if transactionError != nil {
panic(transactionError)
}
defer db.Close()
var users []data.User
err = db.Model(&users).Select()
if err != nil {
panic(err)
}
var images []data.Image
err = db.Model(&images).Select()
if err != nil {
panic(err)
}
var organizations []data.Organization
err = db.Model(&organizations).Select()
if err != nil {
panic(err)
}
var processes []data.Process
err = db.Model(&processes).Select()
if err != nil {
panic(err)
}
var processingSteps []data.ProcessingStep
err = db.Model(&processingSteps).Select()
if err != nil {
panic(err)
}
transaction.Commit()
fmt.Println("DB Health Check succeeded!")
return nil
}
| [
"\"DATABASE_URL\""
] | [] | [
"DATABASE_URL"
] | [] | ["DATABASE_URL"] | go | 1 | 0 | |
algorithm/math/nCr.py | def combination(n, r):
result = 1
for i in range(1, r + 1):
result *= (n - (r - i)) / (r - (r - i))
return int(result)
if __name__ == "__main__":
print("[nCr]")
n = int(input("Input the n: "))
r = int(input("Input the r: "))
result = combination(n, r)
print(result)
| [] | [] | [] | [] | [] | python | null | null | null |
google/cloud/dialogflow_v2/services/session_entity_types/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.services.session_entity_types import pagers
from google.cloud.dialogflow_v2.types import entity_type
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SessionEntityTypesGrpcTransport
from .transports.grpc_asyncio import SessionEntityTypesGrpcAsyncIOTransport
class SessionEntityTypesClientMeta(type):
"""Metaclass for the SessionEntityTypes client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[SessionEntityTypesTransport]]
_transport_registry["grpc"] = SessionEntityTypesGrpcTransport
_transport_registry["grpc_asyncio"] = SessionEntityTypesGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[SessionEntityTypesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SessionEntityTypesClient(metaclass=SessionEntityTypesClientMeta):
"""Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SessionEntityTypesTransport:
"""Returns the transport used by the client instance.
Returns:
SessionEntityTypesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def session_entity_type_path(project: str, session: str, entity_type: str,) -> str:
"""Returns a fully-qualified session_entity_type string."""
return "projects/{project}/agent/sessions/{session}/entityTypes/{entity_type}".format(
project=project, session=session, entity_type=entity_type,
)
@staticmethod
def parse_session_entity_type_path(path: str) -> Dict[str, str]:
"""Parses a session_entity_type path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/agent/sessions/(?P<session>.+?)/entityTypes/(?P<entity_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SessionEntityTypesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the session entity types client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SessionEntityTypesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SessionEntityTypesTransport):
# transport is a SessionEntityTypesTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def list_session_entity_types(
self,
request: Union[session_entity_type.ListSessionEntityTypesRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSessionEntityTypesPager:
r"""Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.ListSessionEntityTypesRequest, dict]):
The request object. The request message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityTypes.ListSessionEntityTypes].
parent (str):
Required. The session to list all session entity types
from. Format:
``projects/<Project ID>/agent/sessions/<Session ID>`` or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/ sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.session_entity_types.pagers.ListSessionEntityTypesPager:
The response message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityTypes.ListSessionEntityTypes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.ListSessionEntityTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.ListSessionEntityTypesRequest):
request = session_entity_type.ListSessionEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_session_entity_types
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSessionEntityTypesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_session_entity_type(
self,
request: Union[session_entity_type.GetSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> session_entity_type.SessionEntityType:
r"""Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.GetSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.GetSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.GetSessionEntityType].
name (str):
Required. The name of the session entity type. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``
or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.GetSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.GetSessionEntityTypeRequest):
request = session_entity_type.GetSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_session_entity_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_session_entity_type(
self,
request: Union[
gcd_session_entity_type.CreateSessionEntityTypeRequest, dict
] = None,
*,
parent: str = None,
session_entity_type: gcd_session_entity_type.SessionEntityType = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_session_entity_type.SessionEntityType:
r"""Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.CreateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.CreateSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.CreateSessionEntityType].
parent (str):
Required. The session to create a session entity type
for. Format:
``projects/<Project ID>/agent/sessions/<Session ID>`` or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/ sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
session_entity_type (google.cloud.dialogflow_v2.types.SessionEntityType):
Required. The session entity type to
create.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, session_entity_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_session_entity_type.CreateSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_session_entity_type.CreateSessionEntityTypeRequest
):
request = gcd_session_entity_type.CreateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if session_entity_type is not None:
request.session_entity_type = session_entity_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_session_entity_type(
self,
request: Union[
gcd_session_entity_type.UpdateSessionEntityTypeRequest, dict
] = None,
*,
session_entity_type: gcd_session_entity_type.SessionEntityType = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_session_entity_type.SessionEntityType:
r"""Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.UpdateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.UpdateSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.UpdateSessionEntityType].
session_entity_type (google.cloud.dialogflow_v2.types.SessionEntityType):
Required. The session entity type to
update.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The mask to control which
fields get updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([session_entity_type, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_session_entity_type.UpdateSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_session_entity_type.UpdateSessionEntityTypeRequest
):
request = gcd_session_entity_type.UpdateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if session_entity_type is not None:
request.session_entity_type = session_entity_type
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("session_entity_type.name", request.session_entity_type.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_session_entity_type(
self,
request: Union[session_entity_type.DeleteSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.DeleteSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.DeleteSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.DeleteSessionEntityType].
name (str):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``
or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.DeleteSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.DeleteSessionEntityTypeRequest):
request = session_entity_type.DeleteSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SessionEntityTypesClient",)
| [] | [] | [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
] | [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
main_dasalc.py | import argparse
import logging
import os
import time
import uuid
import numpy as np
import pyltr
import tensorflow as tf
from evaluation import compute_mean_ndcg, compute_perf_metrics, create_trec_eval_format_run_qrels
from lambdamart import compute_lambdamart_preds
# from model import ReRanker
from dasalc_model import ReRanker
from simulate_unsupervised_rj import compare_artif_rj_with_real_ones, sample_labels
from utils import load_model, pad_list, save_model
flags = tf.app.flags
FLAGS = flags.FLAGS
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument("--coll_name", type=str, default='MQ2007', help="Collection name")
parser.add_argument("--data_folder", type=str, default='../LETOR_data/MQ2007/', help="Data folder.")
parser.add_argument("--simulate_labels", type=str, default=False,
help="Whether to train with simulated labels or not.")
parser.add_argument("--expand_training_data", type=str, default=False,
help="Whether to expand training data or not.")
parser.add_argument("--det_model", type=str, default=True, help="Whether to use probabilistic layers or not.")
parser.add_argument("--rerank_lambdamart", type=str, default=False,
help="Whether to rerank lambdamart preds or from scratch.")
parser.add_argument("--lambdamart_preds_path", type=str, default='../LETOR_data/MQ2007/lambdamart_runs',
help="LM data folder.")
# model parameters
parser.add_argument("--seed", type=float, default=0, help="The random seed to use.")
parser.add_argument("--n_binomial_samples", type=float, default=32,
help="The number of binomial samples to simulate.")
# parser.add_argument("--loss", type=str, default='Hinge', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_B', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_G', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_G_H', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='KL_B_H', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='ApproxNDCG', help="The loss to use to train the model.")
parser.add_argument("--loss", type=str, default='ApproxNDCG_G', help="The loss to use to train the model.")
# parser.add_argument("--loss", type=str, default='MSE', help="The loss to use to train the model.")
parser.add_argument("--norm_labels", type=bool, default=False,
help="Whether to normalize within [0,1] the relevance labels.")
parser.add_argument("--num_features", type=int, default=46, help="Number of features per document.")
parser.add_argument("--num_epochs", type=int, default=100, help="The number of epochs for training.")
parser.add_argument("--n_heads", type=int, default=2, help="Num heads.")
parser.add_argument("--batch_size", type=int, default=4, help="The batch size for training.") # MQ2007
# parser.add_argument("--batch_size", type=int, default=2, help="The batch size for training.") # MQ2008
parser.add_argument("--list_size_test", type=int, default=147, help="List size.") # MQ2007
# parser.add_argument("--list_size_test", type=int, default=120, help="List size.") # MQ2008
# parser.add_argument("--list_size_train", type=int, default=147, help="List size.")
parser.add_argument("--learning_rate", type=float, default=1e-3, help="Learning rate for optimizer.") # MQ2008 and MQ2007
parser.add_argument("--model_ckpt_path", type=str, default='./output/chkpts/',
help="Output path for checkpoint saving.")
def remove_queries_without_rel_docs(rj, docs, rl_lengths, dids):
indices_to_remove = []
for i in range(len(rj)):
if max(rj[i]) == 0:
indices_to_remove.append(i)
rj = [rj[i] for i in range(len(rj)) if i not in indices_to_remove]
docs = [docs[i] for i in range(len(docs)) if i not in indices_to_remove]
rl_lengths = [rl_lengths[i] for i in range(len(rl_lengths)) if i not in indices_to_remove]
dids = [dids[i] for i in range(len(dids)) if i not in indices_to_remove]
return rj, docs, rl_lengths, dids
def group_docs_with_lambdamart_preds(preds, qids, docs, labels, max_list_size):
grouped = {}
for i in range(len(qids)):
if qids[i] in grouped.keys():
grouped[qids[i]].append((preds[i], docs[i], labels[i]))
else:
grouped[qids[i]] = [(preds[i], docs[i], labels[i])]
grouped_docs = []
grouped_labels = []
rl_lengths = []
for group in grouped.values():
g = np.array(group)
lmp = g[:, 0]
indices = np.argsort(-lmp)
ranked_list = list(g[:, 1][indices])
ranked_labels = list(g[:, 2][indices])
while len(ranked_list) < max_list_size:
ranked_list.append(np.zeros(FLAGS.num_features))
ranked_labels.append(0.0)
ranked_list = ranked_list[:max_list_size]
ranked_labels = ranked_labels[:max_list_size]
grouped_docs.append(ranked_list)
grouped_labels.append(ranked_labels)
rl_lengths.append(min(max_list_size, len(lmp)))
return grouped_docs, grouped_labels, rl_lengths
def read_data(data_folder, fold_f):
# data_fpath = './data_proc/{}_{}_listSize={}_rerank_lambdamart={}.hkl'.format(FLAGS.coll_name, fold_f,
# FLAGS.list_size_test,
# FLAGS.rerank_lambdamart)
# if not os.path.isfile(data_fpath) or not FLAGS.load_proc_data:
training_file_path = os.path.join(os.path.join(data_folder, fold_f), 'train.txt')
valid_file_path = os.path.join(os.path.join(data_folder, fold_f), 'vali.txt')
test_file_path = os.path.join(os.path.join(data_folder, fold_f), 'test.txt')
docs_train, lab_train, qids_train, _ = pyltr.data.letor.read_dataset(open(training_file_path))
docs_val, lab_val, qids_val, _ = pyltr.data.letor.read_dataset(open(valid_file_path))
docs_test, lab_test, qids_test, _ = pyltr.data.letor.read_dataset(open(test_file_path))
dids_train = ['fake_did_{}'.format(i) for i in range(len(docs_train))]
dids_test = ['fake_did_{}'.format(i) for i in range(len(docs_test))]
dids_val = ['fake_did_{}'.format(i) for i in range(len(docs_val))]
max_l = np.max(lab_train)
print('max label: {}'.format(max_l))
lab_train = np.array(lab_train) / max_l
lab_val = np.array(lab_val) / max_l
lab_test = np.array(lab_test) / max_l
assert 0 <= max(lab_test) <= 1
assert 0 <= max(lab_train) <= 1
assert 0 <= max(lab_val) <= 1
# without lambdamart
ranking_lists_train, all_labels_train, rl_lengths_train, resp_qids_train, resp_dids_train = \
group_data_in_ranking_lists(docs_train, lab_train, qids_train, dids_train, FLAGS.list_size_test)
ranking_lists_val, all_labels_val, rl_lengths_val, resp_qids_val, resp_dids_val = \
group_data_in_ranking_lists(docs_val, lab_val, qids_val, dids_val, FLAGS.list_size_test)
ranking_lists_test, all_labels_test, rl_lengths_test, resp_qids_test, resp_dids_test = \
group_data_in_ranking_lists(docs_test, lab_test, qids_test, dids_test, FLAGS.list_size_test)
# if FLAGS.load_proc_data:
# print('dumping data')
# save_model(((ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train),
# (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test),
# (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val),
# (np.array(lab_val, dtype=np.float32), np.array(lab_test, dtype=np.float32),
# np.array(qids_val, dtype=np.float32), np.array(qids_test, dtype=np.float32))),
# data_fpath)
# else:
# print('loading data')
# (ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train), \
# (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test), \
# (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val), \
# (lab_val, lab_test, qids_val, qids_test) = load_model(data_fpath)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
if FLAGS.simulate_labels:
# artif_labels = compute_simulated_labels(ranking_lists_train, rl_lengths_train, all_labels_train)
artif_labels = sample_labels(all_labels_train, rl_lengths_train, FLAGS.n_binomial_samples)
compare_artif_rj_with_real_ones(artif_labels, all_labels_train, rl_lengths_train)
all_labels_train = artif_labels
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
# avg_n_rel_docs = np.mean([np.sum([1 for rj in rl if rj > 0]) for rl in all_labels_train])
# print('avg number of relevant documents per ranked list in training data: {}'.format(avg_n_rel_docs))
if FLAGS.expand_training_data:
ranking_lists_train, all_labels_train, rl_lengths_train = augment_training_data(ranking_lists_train,
all_labels_train,
rl_lengths_train)
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(
all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)
else:
FLAGS.list_size_train = FLAGS.list_size_test
return ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val, lab_test, qids_val, qids_test
def augment_training_data(training_docs, training_rj, rl_lengths):
training_rj = np.array(training_rj)
rl_lengths = np.array(rl_lengths)
n_samples_per_rl = 5
new_ranked_lists = []
new_rj = []
new_lengths = []
for i in range(len(training_docs)):
docs_to_sample = np.array(training_docs[i][:rl_lengths[i]])
for _ in range(n_samples_per_rl):
sel_indices = np.random.choice([idx for idx in range(len(docs_to_sample))], size=FLAGS.list_size_train,
replace=True)
new_ranked_lists.append(docs_to_sample[sel_indices])
new_rj.append(training_rj[i][sel_indices])
new_lengths.append(FLAGS.list_size_train)
return new_ranked_lists, new_rj, new_lengths
def load_lambdaMART_preds(fold_f, lambdamart_preds_path):
"""
Fold Training.txt Validation.txt Test.txt
Fold1 S1, S2, S3 S4 S5
Fold2 S2, S3, S4 S5 S1
Fold3 S3, S4, S5 S1 S2
Fold4 S4, S5, S1 S2 S3
Fold5 S5, S1, S2 S3 S4
"""
test_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + fold_f + '.hkl')
if not os.path.isfile(test_preds_path):
compute_lambdamart_preds(FLAGS)
test_preds = load_model(test_preds_path)
training_folds = []
validation_folds = []
if fold_f == 'Fold1':
training_folds = ['Fold2', 'Fold3', 'Fold4']
validation_folds = ['Fold5']
elif fold_f == 'Fold2':
training_folds = ['Fold3', 'Fold4', 'Fold5']
validation_folds = ['Fold1']
elif fold_f == 'Fold3':
training_folds = ['Fold4', 'Fold5', 'Fold1']
validation_folds = ['Fold2']
elif fold_f == 'Fold4':
training_folds = ['Fold5', 'Fold1', 'Fold2']
validation_folds = ['Fold3']
elif fold_f == 'Fold5':
training_folds = ['Fold1', 'Fold2', 'Fold3']
validation_folds = ['Fold4']
training_preds = []
for ff in training_folds:
tmp_model_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + ff + '.hkl')
training_preds.extend(load_model(tmp_model_path))
val_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + validation_folds[0] + '.hkl')
val_preds = load_model(val_preds_path)
return training_preds, test_preds, val_preds
def group_data_in_ranking_lists(vectors, labels, qids, dids, list_size):
assert len(qids) == len(labels)
assert len(qids) == len(vectors)
data_indices_grouped_by_qid = {}
for i in range(len(qids)):
curr_qid = qids[i]
if curr_qid not in data_indices_grouped_by_qid.keys():
data_indices_grouped_by_qid[curr_qid] = [i]
else:
data_indices_grouped_by_qid[curr_qid].append(i)
print('mean ranking list length: %2.4f' % np.mean([len(item) for item in data_indices_grouped_by_qid.values()]))
print('max ranking list length: %2.4f' % np.max([len(item) for item in data_indices_grouped_by_qid.values()]))
print('min ranking list length: %2.4f' % np.min([len(item) for item in data_indices_grouped_by_qid.values()]))
ranking_lists = []
all_labels = []
rl_lengths = []
resp_qids = []
all_dids = []
for qid, indices_group in data_indices_grouped_by_qid.items():
curr_dids = [dids[i] for i in indices_group]
vecs = [vectors[i] for i in indices_group]
curr_labels = [labels[i] for i in indices_group]
original_rl_len = len(curr_labels)
# pad ranking lists now
vecs = pad_list(vecs, list_size)
curr_labels = curr_labels[0: min(list_size, len(curr_labels))]
curr_labels = curr_labels + [0.0] * (list_size - len(curr_labels))
resp_qids.append(qid)
curr_dids = curr_dids[0: min(list_size, len(curr_dids))]
curr_dids.extend('padding_did_{}'.format(i) for i in range(list_size - len(curr_dids)))
# append to output values
all_labels.append(curr_labels)
ranking_lists.append(vecs)
all_dids.append(curr_dids)
rl_lengths.append(min(list_size, original_rl_len))
return ranking_lists, all_labels, rl_lengths, resp_qids, all_dids
def group_rj_in_ranking_lists_no_pad_trim(qids, labs):
ranking_lists = {}
for i in range(len(qids)):
qid = qids[i]
label = labs[i]
if qid in ranking_lists.keys():
ranking_lists[qid].append(label)
else:
ranking_lists[qid] = [label]
doc_scores = []
doc_rj = []
for k, ranking_list in ranking_lists.items():
curr_scores = []
curr_rj = []
for i in range(len(ranking_list)):
curr_rj.append(ranking_list[i])
doc_scores.append(curr_scores)
doc_rj.append(curr_rj)
return doc_rj
def compute_ranking_lists_rl_length_masks(rl_lengths, list_size):
rl_masks = []
for i in range(len(rl_lengths)):
curr_v = np.zeros(list_size)
for j in range(min(len(curr_v), rl_lengths[i])):
curr_v[j] = 1
rl_masks.append(curr_v)
return rl_masks
def remove_training_rl_without_rel_docs(train_rj, train_docs, rl_lengths_train):
indices_to_remove_train = []
for i in range(len(train_rj)):
if max(train_rj[i]) == 0:
indices_to_remove_train.append(i)
train_rj = [train_rj[i] for i in range(len(train_rj)) if i not in indices_to_remove_train]
train_docs = [train_docs[i] for i in range(len(train_docs)) if i not in indices_to_remove_train]
rl_lengths_train = [rl_lengths_train[i] for i in range(len(rl_lengths_train)) if i not in indices_to_remove_train]
return train_rj, train_docs, rl_lengths_train
def test_model(sess, model, model_path, test_rj, test_docs, rl_lengths, qids_test, labels_test_non_grouped,
silent=False):
rl_test_masks = compute_ranking_lists_rl_length_masks(rl_lengths, FLAGS.list_size_test)
# initialize graph and session
# tf.reset_default_graph()
# sess_config = tf.ConfigProto()
# sess_config.gpu_options.allow_growth = True
# sess = tf.Session(config=sess_config, graph=tf.get_default_graph())
# initialize model
# model = ReRanker(FLAGS.seed, FLAGS.learning_rate, det_model=FLAGS.det_model, n_heads=FLAGS.n_heads,
# num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,
# loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,
# norm_labels=FLAGS.norm_labels)
tf.set_random_seed(FLAGS.seed)
# sess.run(model.init_op)
model.saver.restore(sess, model_path)
sess.graph.finalize()
# compute_predictions
msamples = 50
if FLAGS.det_model:
msamples = 1
all_preds = np.zeros(shape=(msamples, len(test_docs), FLAGS.list_size_test))
for k in range(msamples):
scores = sess.run(model.logits,
{model.training: False, model.input_docs: test_docs, model.rl_lengths_mask: rl_test_masks})
if FLAGS.loss == 'ML':
all_preds[k] = np.argmax(scores, axis=-1)
else:
all_preds[k] = scores
avg_preds = np.mean(all_preds, axis=0)
var_preds = np.var(all_preds, axis=0)
for i in range(len(rl_test_masks)):
for j in range(len(rl_test_masks[i])):
if rl_test_masks[i][j] == 0:
rl_test_masks[i][j] = 0 # -np.inf
else:
rl_test_masks[i][j] = 0
avg_preds = rl_test_masks + avg_preds
var_preds = rl_test_masks + var_preds
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(qids_test, labels_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
ndcg_1, base_1 = compute_mean_ndcg(avg_preds, test_rj, ideal_rel_j_lists, 1)
return avg_preds, ndcg_1, var_preds, compute_perf_metrics(avg_preds, test_rj, ideal_rel_j_lists, silent, rl_lengths)
def get_batches(all_docs, all_labels, rl_lengths_mask):
db = []
rb = []
lb = []
for i in range(len(all_docs)):
db.append(all_docs[i])
rb.append(all_labels[i])
lb.append(rl_lengths_mask[i])
if len(db) == FLAGS.batch_size:
yield db, rb, lb
db = []
rb = []
lb = []
if len(db) > 0:
yield db, rb, lb
def train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix):
ckpt_paths = []
perfs = []
max_patience = 20
patience = 20
ploss = None
early_stopping = False
for epoch in range(1, FLAGS.num_epochs + 1):
if early_stopping:
break
print('*** EPOCH: %d/%d' % (epoch, FLAGS.num_epochs))
start = time.time()
for db, rjb, lenb in get_batches(train_docs, train_rj, rl_train_masks):
_, step, loss = sess.run(
[model.train_op, model.global_step, model.loss],
feed_dict={model.input_docs: db,
model.relevance_judgments: rjb,
model.rl_lengths_mask: lenb,
model.training: True})
if ploss is None:
ploss = loss
else:
if loss >= ploss:
patience -= 1
if patience == 0:
early_stopping = True
print('early stopping')
break
else:
patience = max_patience
if step % 50 == 0:
end = time.time()
print('step: %d, loss: %2.6f, time: %2.3fs' % (step, loss, (end - start)))
step = sess.run(model.global_step)
# save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),
# global_step=step)
for _ in range(100):
try:
save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),
global_step=step)
except:
print('exception, retrying')
continue
break
print("Model saved in path: %s" % save_path)
preds, ndcg_1, var_preds, _ = test_model(sess, model, save_path, test_rj, test_docs, rl_lengths_test, qids_test,
labels_test_non_grouped, silent=False)
perfs.append(ndcg_1)
ckpt_paths.append(save_path)
return ckpt_paths, perfs
def train_eval_model(train_rj, train_docs, test_rj, test_docs, rl_lengths_train, rl_lengths_test,
labels_test_non_grouped, qids_test, model_suffix=str(uuid.uuid4())):
rl_train_masks = compute_ranking_lists_rl_length_masks(rl_lengths_train, FLAGS.list_size_train)
print('max ranking list length in training data: %d' % max(rl_lengths_train))
print('max ranking list length in test data: %d' % max(rl_lengths_test))
# initialize graph and session
tf.reset_default_graph()
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config, graph=tf.get_default_graph())
# initialize model
model = ReRanker(FLAGS.seed, FLAGS.learning_rate, coll_name=FLAGS.coll_name, det_model=FLAGS.det_model,
n_heads=FLAGS.n_heads,
num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,
loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,
norm_labels=FLAGS.norm_labels)
tf.set_random_seed(FLAGS.seed)
sess.run(model.init_op)
sess.graph.finalize()
start_training = time.time()
ckpt_paths, perfs = train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs,
rl_lengths_test, labels_test_non_grouped, qids_test, model_suffix)
print('Model trained in: %2.4fs' % (time.time() - start_training))
# load and evaluate best model
best_model_path = ckpt_paths[np.argmax(perfs)]
print('Best ckpt model path: %s' % best_model_path)
return best_model_path, sess, model
def run():
fold_folders = ['Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5']
# fold_folders = ['Fold1']
all_preds = []
all_rjs = []
all_qids_test = []
all_qids_test_non_g = []
all_dids_test = []
all_lab_test_non_grouped = []
all_rl_lengths = []
perfs_across_folds = {}
for fold_f in fold_folders:
ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \
ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \
ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \
lab_val_non_grouped, lab_test_non_grouped, qids_val, qids_test = read_data(data_folder=FLAGS.data_folder,
fold_f=fold_f)
# print(qids_test)
best_model_path, sess, model = train_eval_model(all_labels_train, ranking_lists_train, all_labels_val,
ranking_lists_val,
rl_lengths_train, rl_lengths_val, lab_val_non_grouped, qids_val)
avg_preds, ndcg_1, var_preds, all_perf = test_model(sess, model, best_model_path, all_labels_test,
ranking_lists_test, rl_lengths_test, qids_test,
lab_test_non_grouped)
all_preds.extend(avg_preds)
all_rjs.extend(all_labels_test)
all_qids_test.extend(resp_qids_test)
all_qids_test_non_g.extend(qids_test)
all_dids_test.extend(resp_dids_test)
all_lab_test_non_grouped.extend(lab_test_non_grouped)
all_rl_lengths.extend(rl_lengths_test)
for k, v in all_perf.items():
if k in perfs_across_folds.keys():
perfs_across_folds[k].append(v)
else:
perfs_across_folds[k] = [v]
for k, v in perfs_across_folds.items():
print('{}: {}'.format(k, np.mean(v)))
# save_model((all_preds, all_rjs, all_qids_test, all_dids_test, all_qids_test_non_g, all_lab_test_non_grouped),
# './output/final_preds_data_{}_{}_{}.hkl'.format(FLAGS.coll_name, FLAGS.loss, FLAGS.simulate_labels))
grouped_rj = group_rj_in_ranking_lists_no_pad_trim(all_qids_test_non_g, all_lab_test_non_grouped)
ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]
all_rjs = np.array(all_rjs) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
ideal_rel_j_lists = np.array(ideal_rel_j_lists) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])
print('\nFINAL PERF AVGD ACROSS FOLDS:')
# import pdb
# pdb.set_trace()
compute_perf_metrics(all_preds, all_rjs, ideal_rel_j_lists, False, all_rl_lengths, max_rj=2.0)
create_trec_eval_format_run_qrels(all_preds, all_dids_test, all_qids_test, all_rjs,
'DASALC_{}_loss={}_simulate_labels={}_det_model={}'.format(FLAGS.coll_name,
FLAGS.loss,
FLAGS.simulate_labels,
FLAGS.det_model),
'./output')
return
def create_trec_format_run(qids, dids, preds, ofpath):
out = open(ofpath, 'w')
for ranked_list_idx in range(len(preds)):
sorted_indices = np.argsort(preds[ranked_list_idx])
for item_idx in sorted_indices:
run_line = '{} Q0 {} {} {} {}\n'.format(qids[ranked_list_idx], dids[ranked_list_idx][item_idx],
item_idx + 1, preds[ranked_list_idx][item_idx], 'PFusion')
out.write(run_line)
out.close()
def flatten_stuff_provide_fake_qids(all_preds, all_rjs):
preds = []
labels = []
qids = []
for i in range(len(all_preds)):
preds.extend(all_preds[i])
labels.extend(all_rjs[i])
qids.extend([i] * len(all_preds[i]))
return np.array(preds), np.array(labels), np.array(qids)
if __name__ == '__main__':
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
arg_parser = argparse.ArgumentParser()
add_arguments(arg_parser)
FLAGS, unparsed = arg_parser.parse_known_args()
for arg in vars(FLAGS):
print(arg, ":", getattr(FLAGS, arg))
if not os.path.exists(FLAGS.model_ckpt_path):
os.makedirs(FLAGS.model_ckpt_path)
np.random.seed(FLAGS.seed)
tf.random.set_random_seed(FLAGS.seed)
run()
print(FLAGS.loss)
print('DONE')
| [] | [] | [
"TF_CPP_MIN_LOG_LEVEL"
] | [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
webapp/app/routes/utils.py | from flask.helpers import url_for
from ..connection.functions import get_job_type
from flask_login import current_user
from flask import redirect, flash
admin_modify = ['country', 'airport', 'airline', 'contains', 'employee',
'flight', 'airport_authority', 'administration', 'engineer',
'traffic_monitor', 'salary']
engineer_modify = ['flight']
airport_autority_mody = ['flight', 'price']
traffic_monitor_modify = ['flight', 'ticket']
administration_modify = ['passenger', 'ticket']
employee_view = ['flight', 'price', 'passenger', 'ticket']
def has_view_permission(table_name):
if not current_user.is_authenticated:
flash('You need to log in!!', 'error')
return redirect(url_for('login_page_roure'))
if current_user.type == 'passenger':
if str(table_name).lower() != 'flight':
flash('You do not have access to view this data!!', 'error')
return redirect(url_for('passenger_home_route'))
if current_user.type == 'employee':
if str(table_name).lower() not in employee_view:
flash('You do not have access to view this data!!', 'error')
return redirect(url_for('employee_home_route'))
return True
def has_modify_permission(table_name):
if not current_user.is_authenticated:
flash('You need to log in!!', 'error')
return redirect(url_for('login_page_roure'))
if current_user.type == 'passenger':
if str(table_name).lower() != 'ticket':
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('passenger_home_route'))
if current_user.type == 'employee':
response = get_job_type(current_user.id)
if not response['success']:
flash('There was some error, try again!!', 'error')
return redirect(url_for('employee_home_route'))
job_type = response['data']
if job_type == 'Administration':
if str(table_name).lower() not in administration_modify:
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('employee_home_route'))
if job_type == 'Engineer':
if str(table_name).lower() not in engineer_modify:
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('employee_home_route'))
if job_type == 'Traffic_monitor':
if str(table_name).lower() not in traffic_monitor_modify:
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('employee_home_route'))
else:
if str(table_name).lower() not in airport_autority_mody:
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('employee_home_route'))
if current_user.type == 'admin':
if str(table_name).lower() not in admin_modify:
flash('You do not have access to modify this data!!', 'error')
return redirect(url_for('admin_home_route'))
return True
def create_where_condition(data):
conditions = data.split(',')
counter = 0
where_condition = str()
for condition in conditions:
counter += 1
where_condition = where_condition + condition
if(counter == len(conditions) - 1):
break
else:
where_condition = where_condition + ' AND '
return where_condition
def create_values(column_data, data, is_set):
values = str()
for column, record in zip(column_data, data):
is_int = bool(column['data_type'] == 'integer')
key = column['column_name']
value = str(data[record]) if is_int else f"'{str(data[record])}'"
if(len(str(data[record])) == 0):
value = 'NULL'
final = f"{key}={value}" if is_set else value
values = f"{values}, {final}"
return values[2:]
| [] | [] | [] | [] | [] | python | null | null | null |
app.go | package main
import (
"encoding/json"
"log"
"net/http"
"os"
"github.com/julienschmidt/httprouter"
)
// How to try: PORT=3000 go run app.go version.go
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
log.Fatal("Couldn't start the service, port is not set")
}
router := httprouter.New()
router.GET("/", info)
err := http.ListenAndServe(":"+port, router)
if err != nil {
log.Fatal(err)
}
}
func info(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
info := struct {
Version string `json:"version"`
Commit string `json:"commit"`
BuildTime string `json:"buildTime"`
}{
Version: version,
Commit: commit,
BuildTime: buildTime,
}
err := json.NewEncoder(w).Encode(info)
if err != nil {
log.Printf("Info handler, error during encode: %v", err)
http.Error(w, "Something went wrong", http.StatusServiceUnavailable)
}
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
config/config.go | package config
import (
"encoding/json"
"fmt"
"os"
"io/ioutil"
"regexp"
"github.com/xichengh/xcblog/utils"
)
type DataType struct {
Mongodb Mongodb `json:"mongodb"`
Base Base `json:"base"`
}
type Base struct {
APIPrefix string `json:"APIPrefix"` /*Api接口前缀*/
UploadImgDir string `json:"UploadImgDir"` /*Api接口前缀*/
ImgPath string `json:"ImgPath"` /*上传后的图片请求地址前缀*/
MaxMultipartMemory int64 `json:"MaxMultipartMemory"` /*上传的图片最大允许的大小,单位MB*/
Port int16 `json:"Port"` /*go监听端口*/
TokenSecret string `json:"TokenSecret"` /*TokenSecret*/
TokenMaxAge int64 `json:"TokenMaxAge"` /*token过期时间,单位秒*/
CooKieMaxAge int `json:"CooKieMaxAge"` /*cookie过期时间,单位秒*/
LogFile string /*日志文件*/
LogDir string /*日志目录*/
ReleaseMode bool /*是否是上线发布模式*/
}
type Mongodb struct {
Database string `json:"Database"` /*数据库名称*/
Address string `json:"Address"` /*数据库地址*/
UserName string `json:"UserName"` /*数据库管理账号*/
PassWord string `json:"PassWord"` /*数据库密码*/
Url string `json:"Url"` /*用于连接数据库*/
}
var MongoConf Mongodb
var BaseConf Base
func logFile() {
ymdStr := utils.GetTodayYMD("-")
pwd, _ := os.Getwd()
//如果配置了日志目录,将日志文件配置为绝对路径
if BaseConf.LogDir != "" {
if utils.CheckNotExist(BaseConf.LogDir) {
utils.MkDir(pwd + BaseConf.LogDir)
}
BaseConf.LogDir = pwd + BaseConf.LogDir + "/"
}
BaseConf.LogFile = BaseConf.LogDir + ymdStr + ".log"
BaseConf.ReleaseMode = os.Getenv("GO_ENV") == "production"
}
func mongoDialUrl() {
MongoConf.Url = "mongodb://" + MongoConf.UserName + ":" + MongoConf.PassWord + "@" + MongoConf.Address + "/" + MongoConf.Database
if MongoConf.UserName == "" || MongoConf.PassWord == "" {
MongoConf.Url = "mongodb://" + MongoConf.Address
}
}
func initConfig() {
var data DataType
configFilePath := "config.json"
if(len(os.Args) > 1) {
configFilePath = os.Args[1]
}
configFile, err := os.Open(configFilePath)
defer configFile.Close()
if err != nil {
fmt.Println("OpenFile: ", err.Error())
os.Exit(-1)
}
bytes, _ := ioutil.ReadAll(configFile)
configStr := string(bytes[:])
reg := regexp.MustCompile(`/\*.*\*/`) //去掉json文件中的注释
configStr = reg.ReplaceAllString(configStr, "")
bytes = []byte(configStr)
json.Unmarshal(bytes, &data)
MongoConf = data.Mongodb
BaseConf = data.Base
if !BaseConf.ReleaseMode {
fmt.Println("---------------------配置文件加载完成-------------------")
}
}
func init() {
initConfig()
mongoDialUrl()
logFile()
} | [
"\"GO_ENV\""
] | [] | [
"GO_ENV"
] | [] | ["GO_ENV"] | go | 1 | 0 | |
command/root.go | package command
import (
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/MakeNowJust/heredoc"
"github.com/cli/cli/api"
"github.com/cli/cli/context"
"github.com/cli/cli/git"
"github.com/cli/cli/internal/config"
"github.com/cli/cli/internal/ghinstance"
"github.com/cli/cli/internal/ghrepo"
"github.com/cli/cli/internal/run"
apiCmd "github.com/cli/cli/pkg/cmd/api"
authCmd "github.com/cli/cli/pkg/cmd/auth"
authLoginCmd "github.com/cli/cli/pkg/cmd/auth/login"
authLogoutCmd "github.com/cli/cli/pkg/cmd/auth/logout"
gistCreateCmd "github.com/cli/cli/pkg/cmd/gist/create"
prCheckoutCmd "github.com/cli/cli/pkg/cmd/pr/checkout"
prDiffCmd "github.com/cli/cli/pkg/cmd/pr/diff"
prReviewCmd "github.com/cli/cli/pkg/cmd/pr/review"
repoCmd "github.com/cli/cli/pkg/cmd/repo"
repoCloneCmd "github.com/cli/cli/pkg/cmd/repo/clone"
repoCreateCmd "github.com/cli/cli/pkg/cmd/repo/create"
creditsCmd "github.com/cli/cli/pkg/cmd/repo/credits"
repoForkCmd "github.com/cli/cli/pkg/cmd/repo/fork"
repoViewCmd "github.com/cli/cli/pkg/cmd/repo/view"
"github.com/cli/cli/pkg/cmdutil"
"github.com/cli/cli/pkg/iostreams"
"github.com/cli/cli/utils"
"github.com/google/shlex"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// Version is dynamically set by the toolchain or overridden by the Makefile.
var Version = "DEV"
// BuildDate is dynamically set at build time in the Makefile.
var BuildDate = "" // YYYY-MM-DD
var versionOutput = ""
var defaultStreams *iostreams.IOStreams
func init() {
if Version == "DEV" {
if info, ok := debug.ReadBuildInfo(); ok && info.Main.Version != "(devel)" {
Version = info.Main.Version
}
}
Version = strings.TrimPrefix(Version, "v")
if BuildDate == "" {
RootCmd.Version = Version
} else {
RootCmd.Version = fmt.Sprintf("%s (%s)", Version, BuildDate)
}
versionOutput = fmt.Sprintf("gh version %s\n%s\n", RootCmd.Version, changelogURL(Version))
RootCmd.AddCommand(versionCmd)
RootCmd.SetVersionTemplate(versionOutput)
RootCmd.PersistentFlags().Bool("help", false, "Show help for command")
RootCmd.Flags().Bool("version", false, "Show gh version")
// TODO:
// RootCmd.PersistentFlags().BoolP("verbose", "V", false, "enable verbose output")
RootCmd.SetHelpFunc(rootHelpFunc)
RootCmd.SetUsageFunc(rootUsageFunc)
RootCmd.SetFlagErrorFunc(func(cmd *cobra.Command, err error) error {
if err == pflag.ErrHelp {
return err
}
return &cmdutil.FlagError{Err: err}
})
defaultStreams = iostreams.System()
// TODO: iron out how a factory incorporates context
cmdFactory := &cmdutil.Factory{
IOStreams: defaultStreams,
HttpClient: func() (*http.Client, error) {
// TODO: decouple from `context`
ctx := context.New()
cfg, err := ctx.Config()
if err != nil {
return nil, err
}
// TODO: avoid setting Accept header for `api` command
return httpClient(defaultStreams, cfg, true), nil
},
BaseRepo: func() (ghrepo.Interface, error) {
// TODO: decouple from `context`
ctx := context.New()
return ctx.BaseRepo()
},
Remotes: func() (context.Remotes, error) {
ctx := context.New()
return ctx.Remotes()
},
Config: func() (config.Config, error) {
cfg, err := config.ParseDefaultConfig()
if errors.Is(err, os.ErrNotExist) {
cfg = config.NewBlankConfig()
} else if err != nil {
return nil, err
}
return cfg, nil
},
Branch: func() (string, error) {
currentBranch, err := git.CurrentBranch()
if err != nil {
return "", fmt.Errorf("could not determine current branch: %w", err)
}
return currentBranch, nil
},
}
RootCmd.AddCommand(apiCmd.NewCmdApi(cmdFactory, nil))
gistCmd := &cobra.Command{
Use: "gist",
Short: "Create gists",
Long: `Work with GitHub gists.`,
}
RootCmd.AddCommand(gistCmd)
gistCmd.AddCommand(gistCreateCmd.NewCmdCreate(cmdFactory, nil))
RootCmd.AddCommand(authCmd.Cmd)
authCmd.Cmd.AddCommand(authLoginCmd.NewCmdLogin(cmdFactory, nil))
authCmd.Cmd.AddCommand(authLogoutCmd.NewCmdLogout(cmdFactory, nil))
resolvedBaseRepo := func() (ghrepo.Interface, error) {
httpClient, err := cmdFactory.HttpClient()
if err != nil {
return nil, err
}
apiClient := api.NewClientFromHTTP(httpClient)
ctx := context.New()
remotes, err := ctx.Remotes()
if err != nil {
return nil, err
}
repoContext, err := context.ResolveRemotesToRepos(remotes, apiClient, "")
if err != nil {
return nil, err
}
baseRepo, err := repoContext.BaseRepo()
if err != nil {
return nil, err
}
return baseRepo, nil
}
repoResolvingCmdFactory := *cmdFactory
repoResolvingCmdFactory.BaseRepo = resolvedBaseRepo
RootCmd.AddCommand(repoCmd.Cmd)
repoCmd.Cmd.AddCommand(repoViewCmd.NewCmdView(&repoResolvingCmdFactory, nil))
repoCmd.Cmd.AddCommand(repoForkCmd.NewCmdFork(&repoResolvingCmdFactory, nil))
repoCmd.Cmd.AddCommand(repoCloneCmd.NewCmdClone(cmdFactory, nil))
repoCmd.Cmd.AddCommand(repoCreateCmd.NewCmdCreate(cmdFactory, nil))
repoCmd.Cmd.AddCommand(creditsCmd.NewCmdRepoCredits(&repoResolvingCmdFactory, nil))
prCmd.AddCommand(prReviewCmd.NewCmdReview(&repoResolvingCmdFactory, nil))
prCmd.AddCommand(prDiffCmd.NewCmdDiff(&repoResolvingCmdFactory, nil))
prCmd.AddCommand(prCheckoutCmd.NewCmdCheckout(&repoResolvingCmdFactory, nil))
RootCmd.AddCommand(creditsCmd.NewCmdCredits(cmdFactory, nil))
}
// RootCmd is the entry point of command-line execution
var RootCmd = &cobra.Command{
Use: "gh <command> <subcommand> [flags]",
Short: "GitHub CLI",
Long: `Work seamlessly with GitHub from the command line.`,
SilenceErrors: true,
SilenceUsage: true,
Example: heredoc.Doc(`
$ gh issue create
$ gh repo clone cli/cli
$ gh pr checkout 321
`),
Annotations: map[string]string{
"help:feedback": heredoc.Doc(`
Fill out our feedback form https://forms.gle/umxd3h31c7aMQFKG7
Open an issue using “gh issue create -R cli/cli”
`),
"help:environment": heredoc.Doc(`
GITHUB_TOKEN: an authentication token for API requests. Setting this avoids being
prompted to authenticate and overrides any previously stored credentials.
GH_REPO: specify the GitHub repository in "OWNER/REPO" format for commands that
otherwise operate on a local repository.
GH_EDITOR, GIT_EDITOR, VISUAL, EDITOR (in order of precedence): the editor tool to use
for authoring text.
BROWSER: the web browser to use for opening links.
DEBUG: set to any value to enable verbose output to standard error. Include values "api"
or "oauth" to print detailed information about HTTP requests or authentication flow.
GLAMOUR_STYLE: the style to use for rendering Markdown. See
https://github.com/charmbracelet/glamour#styles
NO_COLOR: avoid printing ANSI escape sequences for color output.
`),
},
}
var versionCmd = &cobra.Command{
Use: "version",
Hidden: true,
Run: func(cmd *cobra.Command, args []string) {
fmt.Print(versionOutput)
},
}
// overridden in tests
var initContext = func() context.Context {
ctx := context.New()
if repo := os.Getenv("GH_REPO"); repo != "" {
ctx.SetBaseRepo(repo)
}
return ctx
}
// BasicClient returns an API client for github.com only that borrows from but
// does not depend on user configuration
func BasicClient() (*api.Client, error) {
var opts []api.ClientOption
if verbose := os.Getenv("DEBUG"); verbose != "" {
opts = append(opts, apiVerboseLog())
}
opts = append(opts, api.AddHeader("User-Agent", fmt.Sprintf("GitHub CLI %s", Version)))
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
if c, err := config.ParseDefaultConfig(); err == nil {
token, _ = c.Get(ghinstance.Default(), "oauth_token")
}
}
if token != "" {
opts = append(opts, api.AddHeader("Authorization", fmt.Sprintf("token %s", token)))
}
return api.NewClient(opts...), nil
}
func contextForCommand(cmd *cobra.Command) context.Context {
ctx := initContext()
if repo, err := cmd.Flags().GetString("repo"); err == nil && repo != "" {
ctx.SetBaseRepo(repo)
}
return ctx
}
// generic authenticated HTTP client for commands
func httpClient(io *iostreams.IOStreams, cfg config.Config, setAccept bool) *http.Client {
var opts []api.ClientOption
if verbose := os.Getenv("DEBUG"); verbose != "" {
opts = append(opts, apiVerboseLog())
}
opts = append(opts,
api.AddHeader("User-Agent", fmt.Sprintf("GitHub CLI %s", Version)),
api.AddHeaderFunc("Authorization", func(req *http.Request) (string, error) {
if token := os.Getenv("GITHUB_TOKEN"); token != "" {
return fmt.Sprintf("token %s", token), nil
}
hostname := ghinstance.NormalizeHostname(req.URL.Hostname())
token, err := cfg.Get(hostname, "oauth_token")
if token == "" {
var notFound *config.NotFoundError
// TODO: check if stdout is TTY too
if errors.As(err, ¬Found) && io.IsStdinTTY() {
// interactive OAuth flow
token, err = config.AuthFlowWithConfig(cfg, hostname, "Notice: authentication required")
}
if err != nil {
return "", err
}
if token == "" {
// TODO: instruct user how to manually authenticate
return "", fmt.Errorf("authentication required for %s", hostname)
}
}
return fmt.Sprintf("token %s", token), nil
}),
)
if setAccept {
opts = append(opts,
api.AddHeaderFunc("Accept", func(req *http.Request) (string, error) {
// antiope-preview: Checks
accept := "application/vnd.github.antiope-preview+json"
if ghinstance.IsEnterprise(req.URL.Hostname()) {
// shadow-cat-preview: Draft pull requests
accept += ", application/vnd.github.shadow-cat-preview"
}
return accept, nil
}),
)
}
return api.NewHTTPClient(opts...)
}
// LEGACY; overridden in tests
var apiClientForContext = func(ctx context.Context) (*api.Client, error) {
cfg, err := ctx.Config()
if err != nil {
return nil, err
}
http := httpClient(defaultStreams, cfg, true)
return api.NewClientFromHTTP(http), nil
}
func apiVerboseLog() api.ClientOption {
logTraffic := strings.Contains(os.Getenv("DEBUG"), "api")
colorize := utils.IsTerminal(os.Stderr)
return api.VerboseLog(utils.NewColorable(os.Stderr), logTraffic, colorize)
}
func colorableOut(cmd *cobra.Command) io.Writer {
out := cmd.OutOrStdout()
if outFile, isFile := out.(*os.File); isFile {
return utils.NewColorable(outFile)
}
return out
}
func colorableErr(cmd *cobra.Command) io.Writer {
err := cmd.ErrOrStderr()
if outFile, isFile := err.(*os.File); isFile {
return utils.NewColorable(outFile)
}
return err
}
func changelogURL(version string) string {
path := "https://github.com/cli/cli"
r := regexp.MustCompile(`^v?\d+\.\d+\.\d+(-[\w.]+)?$`)
if !r.MatchString(version) {
return fmt.Sprintf("%s/releases/latest", path)
}
url := fmt.Sprintf("%s/releases/tag/v%s", path, strings.TrimPrefix(version, "v"))
return url
}
func determineBaseRepo(apiClient *api.Client, cmd *cobra.Command, ctx context.Context) (ghrepo.Interface, error) {
repo, _ := cmd.Flags().GetString("repo")
if repo != "" {
baseRepo, err := ghrepo.FromFullName(repo)
if err != nil {
return nil, fmt.Errorf("argument error: %w", err)
}
return baseRepo, nil
}
remotes, err := ctx.Remotes()
if err != nil {
return nil, err
}
repoContext, err := context.ResolveRemotesToRepos(remotes, apiClient, "")
if err != nil {
return nil, err
}
baseRepo, err := repoContext.BaseRepo()
if err != nil {
return nil, err
}
return baseRepo, nil
}
// TODO there is a parallel implementation for isolated commands
func formatRemoteURL(cmd *cobra.Command, repo ghrepo.Interface) string {
ctx := contextForCommand(cmd)
var protocol string
cfg, err := ctx.Config()
if err != nil {
fmt.Fprintf(colorableErr(cmd), "%s failed to load config: %s. using defaults\n", utils.Yellow("!"), err)
} else {
protocol, _ = cfg.Get(repo.RepoHost(), "git_protocol")
}
if protocol == "ssh" {
return fmt.Sprintf("git@%s:%s/%s.git", repo.RepoHost(), repo.RepoOwner(), repo.RepoName())
}
return fmt.Sprintf("https://%s/%s/%s.git", repo.RepoHost(), repo.RepoOwner(), repo.RepoName())
}
// TODO there is a parallel implementation for isolated commands
func determineEditor(cmd *cobra.Command) (string, error) {
editorCommand := os.Getenv("GH_EDITOR")
if editorCommand == "" {
ctx := contextForCommand(cmd)
cfg, err := ctx.Config()
if err != nil {
return "", fmt.Errorf("could not read config: %w", err)
}
// TODO: consider supporting setting an editor per GHE host
editorCommand, _ = cfg.Get(ghinstance.Default(), "editor")
}
return editorCommand, nil
}
func ExecuteShellAlias(args []string) error {
externalCmd := exec.Command(args[0], args[1:]...)
externalCmd.Stderr = os.Stderr
externalCmd.Stdout = os.Stdout
externalCmd.Stdin = os.Stdin
preparedCmd := run.PrepareCmd(externalCmd)
return preparedCmd.Run()
}
var findSh = func() (string, error) {
shPath, err := exec.LookPath("sh")
if err == nil {
return shPath, nil
}
if runtime.GOOS == "windows" {
winNotFoundErr := errors.New("unable to locate sh to execute the shell alias with. The sh.exe interpreter is typically distributed with Git for Windows.")
// We can try and find a sh executable in a Git for Windows install
gitPath, err := exec.LookPath("git")
if err != nil {
return "", winNotFoundErr
}
shPath = filepath.Join(filepath.Dir(gitPath), "..", "bin", "sh.exe")
_, err = os.Stat(shPath)
if err != nil {
return "", winNotFoundErr
}
return shPath, nil
}
return "", errors.New("unable to locate sh to execute shell alias with")
}
// ExpandAlias processes argv to see if it should be rewritten according to a user's aliases. The
// second return value indicates whether the alias should be executed in a new shell process instead
// of running gh itself.
func ExpandAlias(args []string) (expanded []string, isShell bool, err error) {
err = nil
isShell = false
expanded = []string{}
if len(args) < 2 {
// the command is lacking a subcommand
return
}
ctx := initContext()
cfg, err := ctx.Config()
if err != nil {
return
}
aliases, err := cfg.Aliases()
if err != nil {
return
}
expansion, ok := aliases.Get(args[1])
if ok {
if strings.HasPrefix(expansion, "!") {
isShell = true
shPath, shErr := findSh()
if shErr != nil {
err = shErr
return
}
expanded = []string{shPath, "-c", expansion[1:]}
if len(args[2:]) > 0 {
expanded = append(expanded, "--")
expanded = append(expanded, args[2:]...)
}
return
}
extraArgs := []string{}
for i, a := range args[2:] {
if !strings.Contains(expansion, "$") {
extraArgs = append(extraArgs, a)
} else {
expansion = strings.ReplaceAll(expansion, fmt.Sprintf("$%d", i+1), a)
}
}
lingeringRE := regexp.MustCompile(`\$\d`)
if lingeringRE.MatchString(expansion) {
err = fmt.Errorf("not enough arguments for alias: %s", expansion)
return
}
var newArgs []string
newArgs, err = shlex.Split(expansion)
if err != nil {
return
}
expanded = append(newArgs, extraArgs...)
return
}
expanded = args[1:]
return
}
func connectedToTerminal(cmd *cobra.Command) bool {
return utils.IsTerminal(cmd.InOrStdin()) && utils.IsTerminal(cmd.OutOrStdout())
}
| [
"\"GH_REPO\"",
"\"DEBUG\"",
"\"GITHUB_TOKEN\"",
"\"DEBUG\"",
"\"GITHUB_TOKEN\"",
"\"DEBUG\"",
"\"GH_EDITOR\""
] | [] | [
"DEBUG",
"GITHUB_TOKEN",
"GH_EDITOR",
"GH_REPO"
] | [] | ["DEBUG", "GITHUB_TOKEN", "GH_EDITOR", "GH_REPO"] | go | 4 | 0 | |
binderhub/app.py | """
The binderhub application
"""
import asyncio
import ipaddress
import json
import logging
import os
import re
import secrets
from binascii import a2b_hex
from concurrent.futures import ThreadPoolExecutor
from glob import glob
from urllib.parse import urlparse
import kubernetes.client
import kubernetes.config
from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
import tornado.ioloop
import tornado.options
import tornado.log
from tornado.log import app_log
import tornado.web
from traitlets import (
Bool,
Bytes,
Dict,
Integer,
TraitError,
Unicode,
Union,
Type,
default,
observe,
validate,
)
from traitlets.config import Application
from jupyterhub.services.auth import HubOAuthCallbackHandler
from jupyterhub.traitlets import Callable
from .base import AboutHandler, Custom404, VersionHandler
from .build import Build
from .builder import BuildHandler
from .config import ConfigHandler
from .health import HealthHandler
from .launcher import Launcher
from .log import log_request
from .ratelimit import RateLimiter
from .repoproviders import RepoProvider
from .registry import DockerRegistry
from .main import MainHandler, ParameterizedMainHandler, LegacyRedirectHandler
from .repoproviders import (GitHubRepoProvider, GitRepoProvider,
GitLabRepoProvider, GistRepoProvider,
ZenodoProvider, FigshareProvider, HydroshareProvider,
DataverseProvider)
from .metrics import MetricsHandler
from .utils import ByteSpecification, url_path_join
from .events import EventLog
HERE = os.path.dirname(os.path.abspath(__file__))
class BinderHub(Application):
"""An Application for starting a builder."""
@default('log_level')
def _log_level(self):
return logging.INFO
aliases = {
'log-level': 'Application.log_level',
'f': 'BinderHub.config_file',
'config': 'BinderHub.config_file',
'port': 'BinderHub.port',
}
flags = {
'debug': (
{'BinderHub': {'debug': True}},
"Enable debug HTTP serving & debug logging"
)
}
config_file = Unicode(
'binderhub_config.py',
help="""
Config file to load.
If a relative path is provided, it is taken relative to current directory
""",
config=True
)
google_analytics_code = Unicode(
None,
allow_none=True,
help="""
The Google Analytics code to use on the main page.
Note that we'll respect Do Not Track settings, despite the fact that GA does not.
We will not load the GA scripts on browsers with DNT enabled.
""",
config=True
)
google_analytics_domain = Unicode(
'auto',
help="""
The Google Analytics domain to use on the main page.
By default this is set to 'auto', which sets it up for current domain and all
subdomains. This can be set to a more restrictive domain here for better privacy
""",
config=True
)
about_message = Unicode(
'',
help="""
Additional message to display on the about page.
Will be directly inserted into the about page's source so you can use
raw HTML.
""",
config=True
)
banner_message = Unicode(
'',
help="""
Message to display in a banner on all pages.
The value will be inserted "as is" into a HTML <div> element
with grey background, located at the top of the BinderHub pages. Raw
HTML is supported.
""",
config=True
)
extra_footer_scripts = Dict(
{},
help="""
Extra bits of JavaScript that should be loaded in footer of each page.
Only the values are set up as scripts. Keys are used only
for sorting.
Omit the <script> tag. This should be primarily used for
analytics code.
""",
config=True
)
base_url = Unicode(
'/',
help="The base URL of the entire application",
config=True)
@validate('base_url')
def _valid_base_url(self, proposal):
if not proposal.value.startswith('/'):
proposal.value = '/' + proposal.value
if not proposal.value.endswith('/'):
proposal.value = proposal.value + '/'
return proposal.value
badge_base_url = Union(
trait_types=[Unicode(), Callable()],
help="""
Base URL to use when generating launch badges.
Can also be a function that is passed the current handler and returns
the badge base URL, or "" for the default.
For example, you could get the badge_base_url from a custom HTTP
header, the Referer header, or from a request parameter
""",
config=True
)
@default('badge_base_url')
def _badge_base_url_default(self):
return ''
@validate('badge_base_url')
def _valid_badge_base_url(self, proposal):
if callable(proposal.value):
return proposal.value
# add a trailing slash only when a value is set
if proposal.value and not proposal.value.endswith('/'):
proposal.value = proposal.value + '/'
return proposal.value
auth_enabled = Bool(
False,
help="""If JupyterHub authentication enabled,
require user to login (don't create temporary users during launch) and
start the new server for the logged in user.""",
config=True)
port = Integer(
8585,
help="""
Port for the builder to listen on.
""",
config=True
)
appendix = Unicode(
help="""
Appendix to pass to repo2docker
A multi-line string of Docker directives to run.
Since the build context cannot be affected,
ADD will typically not be useful.
This should be a Python string template.
It will be formatted with at least the following names available:
- binder_url: the shareable URL for the current image
(e.g. for sharing links to the current Binder)
- repo_url: the repository URL used to build the image
""",
config=True,
)
sticky_builds = Bool(
False,
help="""
Attempt to assign builds for the same repository to the same node.
In order to speed up re-builds of a repository all its builds will
be assigned to the same node in the cluster.
Note: This feature only works if you also enable docker-in-docker support.
""",
config=True,
)
use_registry = Bool(
True,
help="""
Set to true to push images to a registry & check for images in registry.
Set to false to use only local docker images. Useful when running
in a single node.
""",
config=True
)
build_class = Type(
Build,
help="""
The class used to build repo2docker images.
Must inherit from binderhub.build.Build
""",
config=True
)
per_repo_quota = Integer(
0,
help="""
Maximum number of concurrent users running from a given repo.
Limits the amount of Binder that can be consumed by a single repo.
0 (default) means no quotas.
""",
config=True,
)
pod_quota = Integer(
None,
help="""
The number of concurrent pods this hub has been designed to support.
This quota is used as an indication for how much above or below the
design capacity a hub is running. It is not used to reject new launch
requests when usage is above the quota.
The default corresponds to no quota, 0 means the hub can't accept pods
(maybe because it is in maintenance mode), and any positive integer
sets the quota.
""",
allow_none=True,
config=True,
)
per_repo_quota_higher = Integer(
0,
help="""
Maximum number of concurrent users running from a higher-quota repo.
Limits the amount of Binder that can be consumed by a single repo. This
quota is a second limit for repos with special status. See the
`high_quota_specs` parameter of RepoProvider classes for usage.
0 (default) means no quotas.
""",
config=True,
)
log_tail_lines = Integer(
100,
help="""
Limit number of log lines to show when connecting to an already running build.
""",
config=True,
)
push_secret = Unicode(
'binder-build-docker-config',
allow_none=True,
help="""
A kubernetes secret object that provides credentials for pushing built images.
""",
config=True
)
image_prefix = Unicode(
"",
help="""
Prefix for all built docker images.
If you are pushing to gcr.io, this would start with:
gcr.io/<your-project-name>/
Set according to whatever registry you are pushing to.
Defaults to "", which is probably not what you want :)
""",
config=True
)
build_memory_request = ByteSpecification(
0,
help="""
Amount of memory to request when scheduling a build
0 reserves no memory.
This is used as the request for the pod that is spawned to do the building,
even though the pod itself will not be using that much memory
since the docker building is happening outside the pod.
However, it makes kubernetes aware of the resources being used,
and lets it schedule more intelligently.
""",
config=True,
)
build_memory_limit = ByteSpecification(
0,
help="""
Max amount of memory allocated for each image build process.
0 sets no limit.
This is applied to the docker build itself via repo2docker,
though it is also applied to our pod that submits the build,
even though that pod will rarely consume much memory.
Still, it makes it easier to see the resource limits in place via kubernetes.
""",
config=True,
)
debug = Bool(
False,
help="""
Turn on debugging.
""",
config=True
)
build_docker_host = Unicode(
"/var/run/docker.sock",
config=True,
help="""
The docker URL repo2docker should use to build the images.
Currently, only paths are supported, and they are expected to be available on
all the hosts.
"""
)
@validate('build_docker_host')
def docker_build_host_validate(self, proposal):
parts = urlparse(proposal.value)
if parts.scheme != 'unix' or parts.netloc != '':
raise TraitError("Only unix domain sockets on same node are supported for build_docker_host")
return proposal.value
build_docker_config = Dict(
None,
allow_none=True,
help="""
A dict which will be merged into the .docker/config.json of the build container (repo2docker)
Here, you could for example pass proxy settings as described here:
https://docs.docker.com/network/proxy/#configure-the-docker-client
Note: if you provide your own push_secret, this values wont
have an effect, as the push_secrets will overwrite
.docker/config.json
In this case, make sure that you include your config in your push_secret
""",
config=True
)
hub_api_token = Unicode(
help="""API token for talking to the JupyterHub API""",
config=True,
)
@default('hub_api_token')
def _default_hub_token(self):
return os.environ.get('JUPYTERHUB_API_TOKEN', '')
hub_url = Unicode(
help="""
The base URL of the JupyterHub instance where users will run.
e.g. https://hub.mybinder.org/
""",
config=True,
)
hub_url_local = Unicode(
help="""
The base URL of the JupyterHub instance for local/internal traffic
If local/internal network connections from the BinderHub process should access
JupyterHub using a different URL than public/external traffic set this, default
is hub_url
""",
config=True,
)
@default('hub_url_local')
def _default_hub_url_local(self):
return self.hub_url
@validate('hub_url', 'hub_url_local')
def _add_slash(self, proposal):
"""trait validator to ensure hub_url ends with a trailing slash"""
if proposal.value is not None and not proposal.value.endswith('/'):
return proposal.value + '/'
return proposal.value
build_namespace = Unicode(
'default',
help="""
Kubernetes namespace to spawn build pods in.
Note that the push_secret must refer to a secret in this namespace.
""",
config=True
)
build_image = Unicode(
'jupyter/repo2docker:2021.01.0',
help="""
The repo2docker image to be used for doing builds
""",
config=True
)
build_node_selector = Dict(
{},
config=True,
help="""
Select the node where build pod runs on.
"""
)
repo_providers = Dict(
{
'gh': GitHubRepoProvider,
'gist': GistRepoProvider,
'git': GitRepoProvider,
'gl': GitLabRepoProvider,
'zenodo': ZenodoProvider,
'figshare': FigshareProvider,
'hydroshare': HydroshareProvider,
'dataverse': DataverseProvider,
},
config=True,
help="""
List of Repo Providers to register and try
"""
)
@validate('repo_providers')
def _validate_repo_providers(self, proposal):
"""trait validator to ensure there is at least one repo provider"""
if not proposal.value:
raise TraitError("Please provide at least one repo provider")
if any([not issubclass(provider, RepoProvider) for provider in proposal.value.values()]):
raise TraitError("Repository providers should inherit from 'binderhub.RepoProvider'")
return proposal.value
concurrent_build_limit = Integer(
32,
config=True,
help="""The number of concurrent builds to allow."""
)
executor_threads = Integer(
5,
config=True,
help="""The number of threads to use for blocking calls
Should generally be a small number because we don't
care about high concurrency here, just not blocking the webserver.
This executor is not used for long-running tasks (e.g. builds).
""",
)
build_cleanup_interval = Integer(
60,
config=True,
help="""Interval (in seconds) for how often stopped build pods will be deleted."""
)
build_max_age = Integer(
3600 * 4,
config=True,
help="""Maximum age of builds
Builds that are still running longer than this
will be killed.
"""
)
build_token_check_origin = Bool(
True,
config=True,
help="""Whether to validate build token origin.
False disables the origin check.
"""
)
build_token_expires_seconds = Integer(
300,
config=True,
help="""Expiry (in seconds) of build tokens
These are generally only used to authenticate a single request
from a page, so should be short-lived.
""",
)
build_token_secret = Union(
[Unicode(), Bytes()],
config=True,
help="""Secret used to sign build tokens
Lightweight validation of same-origin requests
""",
)
@validate("build_token_secret")
def _validate_build_token_secret(self, proposal):
if isinstance(proposal.value, str):
# allow hex string for text-only input formats
return a2b_hex(proposal.value)
return proposal.value
@default("build_token_secret")
def _default_build_token_secret(self):
if os.environ.get("BINDERHUB_BUILD_TOKEN_SECRET"):
return a2b_hex(os.environ["BINDERHUB_BUILD_TOKEN_SECRET"])
app_log.warning(
"Generating random build token secret."
" Set BinderHub.build_token_secret to avoid this warning."
)
return secrets.token_bytes(32)
# FIXME: Come up with a better name for it?
builder_required = Bool(
True,
config=True,
help="""
If binderhub should try to continue to run without a working build infrastructure.
Build infrastructure is kubernetes cluster + docker. This is useful for pure HTML/CSS/JS local development.
"""
)
ban_networks = Dict(
config=True,
help="""
Dict of networks from which requests should be rejected with 403
Keys are CIDR notation (e.g. '1.2.3.4/32'),
values are a label used in log / error messages.
CIDR strings will be parsed with `ipaddress.ip_network()`.
""",
)
@validate("ban_networks")
def _cast_ban_networks(self, proposal):
"""Cast CIDR strings to IPv[4|6]Network objects"""
networks = {}
for cidr, message in proposal.value.items():
networks[ipaddress.ip_network(cidr)] = message
return networks
ban_networks_min_prefix_len = Integer(
1,
help="The shortest prefix in ban_networks",
)
@observe("ban_networks")
def _update_prefix_len(self, change):
if not change.new:
min_len = 1
else:
min_len = min(net.prefixlen for net in change.new)
self.ban_networks_min_prefix_len = min_len or 1
tornado_settings = Dict(
config=True,
help="""
additional settings to pass through to tornado.
can include things like additional headers, etc.
"""
)
template_variables = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
template_path = Unicode(
help="Path to search for custom jinja templates, before using the default templates.",
config=True,
)
@default('template_path')
def _template_path_default(self):
return os.path.join(HERE, 'templates')
extra_static_path = Unicode(
help='Path to search for extra static files.',
config=True,
)
extra_static_url_prefix = Unicode(
'/extra_static/',
help='Url prefix to serve extra static files.',
config=True,
)
normalized_origin = Unicode(
'',
config=True,
help='Origin to use when emitting events. Defaults to hostname of request when empty'
)
@staticmethod
def add_url_prefix(prefix, handlers):
"""add a url prefix to handlers"""
for i, tup in enumerate(handlers):
lis = list(tup)
lis[0] = url_path_join(prefix, tup[0])
handlers[i] = tuple(lis)
return handlers
def init_pycurl(self):
try:
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
except ImportError as e:
self.log.debug("Could not load pycurl: %s\npycurl is recommended if you have a large number of users.", e)
# set max verbosity of curl_httpclient at INFO
# because debug-logging from curl_httpclient
# includes every full request and response
if self.log_level < logging.INFO:
curl_log = logging.getLogger('tornado.curl_httpclient')
curl_log.setLevel(logging.INFO)
def initialize(self, *args, **kwargs):
"""Load configuration settings."""
super().initialize(*args, **kwargs)
self.load_config_file(self.config_file)
# hook up tornado logging
if self.debug:
self.log_level = logging.DEBUG
tornado.options.options.logging = logging.getLevelName(self.log_level)
tornado.log.enable_pretty_logging()
self.log = tornado.log.app_log
self.init_pycurl()
# initialize kubernetes config
if self.builder_required:
try:
kubernetes.config.load_incluster_config()
except kubernetes.config.ConfigException:
kubernetes.config.load_kube_config()
self.tornado_settings["kubernetes_client"] = self.kube_client = kubernetes.client.CoreV1Api()
# times 2 for log + build threads
self.build_pool = ThreadPoolExecutor(self.concurrent_build_limit * 2)
# default executor for asyncifying blocking calls (e.g. to kubernetes, docker).
# this should not be used for long-running requests
self.executor = ThreadPoolExecutor(self.executor_threads)
jinja_options = dict(autoescape=True, )
template_paths = [self.template_path]
base_template_path = self._template_path_default()
if base_template_path not in template_paths:
# add base templates to the end, so they are looked up at last after custom templates
template_paths.append(base_template_path)
loader = ChoiceLoader([
# first load base templates with prefix
PrefixLoader({'templates': FileSystemLoader([base_template_path])}, '/'),
# load all templates
FileSystemLoader(template_paths)
])
jinja_env = Environment(loader=loader, **jinja_options)
if self.use_registry and self.builder_required:
registry = DockerRegistry(parent=self)
else:
registry = None
self.launcher = Launcher(
parent=self,
hub_url=self.hub_url,
hub_url_local=self.hub_url_local,
hub_api_token=self.hub_api_token,
create_user=not self.auth_enabled,
)
self.event_log = EventLog(parent=self)
for schema_file in glob(os.path.join(HERE, 'event-schemas','*.json')):
with open(schema_file) as f:
self.event_log.register_schema(json.load(f))
self.tornado_settings.update(
{
"log_function": log_request,
"push_secret": self.push_secret,
"image_prefix": self.image_prefix,
"debug": self.debug,
"launcher": self.launcher,
"appendix": self.appendix,
"ban_networks": self.ban_networks,
"ban_networks_min_prefix_len": self.ban_networks_min_prefix_len,
"build_namespace": self.build_namespace,
"build_image": self.build_image,
"build_node_selector": self.build_node_selector,
"build_pool": self.build_pool,
"build_token_check_origin": self.build_token_check_origin,
"build_token_secret": self.build_token_secret,
"build_token_expires_seconds": self.build_token_expires_seconds,
"sticky_builds": self.sticky_builds,
"log_tail_lines": self.log_tail_lines,
"pod_quota": self.pod_quota,
"per_repo_quota": self.per_repo_quota,
"per_repo_quota_higher": self.per_repo_quota_higher,
"repo_providers": self.repo_providers,
"rate_limiter": RateLimiter(parent=self),
"use_registry": self.use_registry,
"build_class": self.build_class,
"registry": registry,
"traitlets_config": self.config,
"google_analytics_code": self.google_analytics_code,
"google_analytics_domain": self.google_analytics_domain,
"about_message": self.about_message,
"banner_message": self.banner_message,
"extra_footer_scripts": self.extra_footer_scripts,
"jinja2_env": jinja_env,
"build_memory_limit": self.build_memory_limit,
"build_memory_request": self.build_memory_request,
"build_docker_host": self.build_docker_host,
"build_docker_config": self.build_docker_config,
"base_url": self.base_url,
"badge_base_url": self.badge_base_url,
"static_path": os.path.join(HERE, "static"),
"static_url_prefix": url_path_join(self.base_url, "static/"),
"template_variables": self.template_variables,
"executor": self.executor,
"auth_enabled": self.auth_enabled,
"event_log": self.event_log,
"normalized_origin": self.normalized_origin,
}
)
if self.auth_enabled:
self.tornado_settings['cookie_secret'] = os.urandom(32)
handlers = [
(r'/metrics', MetricsHandler),
(r'/versions', VersionHandler),
(r"/build/([^/]+)/(.+)", BuildHandler),
(r"/v2/([^/]+)/(.+)", ParameterizedMainHandler),
(r"/repo/([^/]+)/([^/]+)(/.*)?", LegacyRedirectHandler),
# for backward-compatible mybinder.org badge URLs
# /assets/images/badge.svg
(r'/assets/(images/badge\.svg)',
tornado.web.StaticFileHandler,
{'path': self.tornado_settings['static_path']}),
# /badge.svg
(r'/(badge\.svg)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
# /badge_logo.svg
(r'/(badge\_logo\.svg)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
# /logo_social.png
(r'/(logo\_social\.png)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
# /favicon_XXX.ico
(r'/(favicon\_fail\.ico)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
(r'/(favicon\_success\.ico)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
(r'/(favicon\_building\.ico)',
tornado.web.StaticFileHandler,
{'path': os.path.join(self.tornado_settings['static_path'], 'images')}),
(r'/about', AboutHandler),
(r'/health', HealthHandler, {'hub_url': self.hub_url_local}),
(r'/_config', ConfigHandler),
(r'/', MainHandler),
(r'.*', Custom404),
]
handlers = self.add_url_prefix(self.base_url, handlers)
if self.extra_static_path:
handlers.insert(-1, (re.escape(url_path_join(self.base_url, self.extra_static_url_prefix)) + r"(.*)",
tornado.web.StaticFileHandler,
{'path': self.extra_static_path}))
if self.auth_enabled:
oauth_redirect_uri = os.getenv('JUPYTERHUB_OAUTH_CALLBACK_URL') or \
url_path_join(self.base_url, 'oauth_callback')
oauth_redirect_uri = urlparse(oauth_redirect_uri).path
handlers.insert(-1, (re.escape(oauth_redirect_uri), HubOAuthCallbackHandler))
self.tornado_app = tornado.web.Application(handlers, **self.tornado_settings)
def stop(self):
self.http_server.stop()
self.build_pool.shutdown()
async def watch_build_pods(self):
"""Watch build pods
Every build_cleanup_interval:
- delete stopped build pods
- delete running build pods older than build_max_age
"""
while True:
try:
await asyncio.wrap_future(
self.executor.submit(
lambda: Build.cleanup_builds(
self.kube_client,
self.build_namespace,
self.build_max_age,
)
)
)
except Exception:
app_log.exception("Failed to cleanup build pods")
await asyncio.sleep(self.build_cleanup_interval)
def start(self, run_loop=True):
self.log.info("BinderHub starting on port %i", self.port)
self.http_server = HTTPServer(
self.tornado_app,
xheaders=True,
)
self.http_server.listen(self.port)
if self.builder_required:
asyncio.ensure_future(self.watch_build_pods())
if run_loop:
tornado.ioloop.IOLoop.current().start()
main = BinderHub.launch_instance
if __name__ == '__main__':
main()
| [] | [] | [
"JUPYTERHUB_API_TOKEN",
"JUPYTERHUB_OAUTH_CALLBACK_URL",
"BINDERHUB_BUILD_TOKEN_SECRET"
] | [] | ["JUPYTERHUB_API_TOKEN", "JUPYTERHUB_OAUTH_CALLBACK_URL", "BINDERHUB_BUILD_TOKEN_SECRET"] | python | 3 | 0 | |
playbooks/osp/inventory/openstack.py | #!/usr/bin/env python
# Copyright (c) 2012, Marco Vito Moscaritolo <[email protected]>
# Copyright (c) 2013, Jesse Keating <[email protected]>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/openstack/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
# If you want to configure via clouds.yaml, you can put the file in:
# - Current directory
# - ~/.config/openstack/clouds.yaml
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will by default
# connect to all of them and present them as one contiguous inventory. You
# can limit to one cloud by passing the `--cloud` parameter, or use the
# OS_CLOUD environment variable. If caching is enabled, and a cloud is
# selected, then per-cloud cache folders will be used.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
#
# Also it is possible to pass the correct user by setting an ansible_user: $myuser
# metadata attribute.
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
import os_client_config
import shade
import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False, cloud=None):
(cache_file, cache_expiration_time) = get_cache_settings(cloud)
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
with open(cache_file, 'w') as f:
f.write(groups)
else:
with open(cache_file, 'r') as f:
groups = f.read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
metadata = server.get('metadata', {})
if 'ansible_user' in metadata:
hostvars[key]['ansible_user'] = metadata['ansible_user']
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
def get_cache_settings(cloud=None):
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if cloud:
cache_path = '{0}_{1}'.format(cache_path, cloud)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
help='Cloud name (default: None')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
cloud=args.cloud,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except shade.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| [] | [] | [
"OS_CLOUD"
] | [] | ["OS_CLOUD"] | python | 1 | 0 | |
cmd/cue/cmd/common.go | // Copyright 2018 The CUE Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/spf13/pflag"
"golang.org/x/text/language"
"golang.org/x/text/message"
"cuelang.org/go/cue"
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/build"
"cuelang.org/go/cue/errors"
"cuelang.org/go/cue/load"
"cuelang.org/go/cue/parser"
"cuelang.org/go/cue/token"
"cuelang.org/go/internal/encoding"
"cuelang.org/go/internal/filetypes"
"cuelang.org/go/internal/value"
)
// Disallow
// - block comments
// - old-style field comprehensions
// - space separator syntax
const syntaxVersion = -1000 + 100*2 + 1
var requestedVersion = os.Getenv("CUE_SYNTAX_OVERRIDE")
var defaultConfig = config{
loadCfg: &load.Config{
ParseFile: func(name string, src interface{}) (*ast.File, error) {
version := syntaxVersion
if requestedVersion != "" {
switch {
case strings.HasPrefix(requestedVersion, "v0.1"):
version = -1000 + 100
}
}
return parser.ParseFile(name, src,
parser.FromVersion(version),
parser.ParseComments,
)
},
},
}
var runtime = &cue.Runtime{}
var inTest = false
func exitIfErr(cmd *Command, inst *cue.Instance, err error, fatal bool) {
exitOnErr(cmd, err, fatal)
}
func getLang() language.Tag {
loc := os.Getenv("LC_ALL")
if loc == "" {
loc = os.Getenv("LANG")
}
loc = strings.Split(loc, ".")[0]
return language.Make(loc)
}
func exitOnErr(cmd *Command, err error, fatal bool) {
if err == nil {
return
}
// Link x/text as our localizer.
p := message.NewPrinter(getLang())
format := func(w io.Writer, format string, args ...interface{}) {
p.Fprintf(w, format, args...)
}
cwd, _ := os.Getwd()
w := &bytes.Buffer{}
errors.Print(w, err, &errors.Config{
Format: format,
Cwd: cwd,
ToSlash: inTest,
})
b := w.Bytes()
_, _ = cmd.Stderr().Write(b)
if fatal {
exit()
}
}
func loadFromArgs(cmd *Command, args []string, cfg *load.Config) []*build.Instance {
binst := load.Instances(args, cfg)
if len(binst) == 0 {
return nil
}
return binst
}
// A buildPlan defines what should be done based on command line
// arguments and flags.
//
// TODO: allow --merge/-m to mix in other packages.
type buildPlan struct {
cmd *Command
insts []*build.Instance
// instance is a pre-compiled instance, which exists if value files are
// being processed, which may require a schema to decode.
instance *cue.Instance
cfg *config
// If orphanFiles are mixed with CUE files and/or if placement flags are used,
// the instance is also included in insts.
importing bool
mergeData bool // do not merge individual data files.
orphaned []*decoderInfo
orphanInstance *build.Instance
// imported files are files that were orphaned in the build instance, but
// were placed in the instance by using one the --files, --list or --path
// flags.
imported []*ast.File
expressions []ast.Expr // only evaluate these expressions within results
schema ast.Expr // selects schema in instance for orphaned values
// orphan placement flags.
perFile bool
useList bool
path []ast.Label
useContext bool
// outFile defines the file to output to. Default is CUE stdout.
outFile *build.File
encConfig *encoding.Config
}
// instances iterates either over a list of instances, or a list of
// data files. In the latter case, there must be either 0 or 1 other
// instance, with which the data instance may be merged.
func (b *buildPlan) instances() iterator {
var i iterator
switch {
case len(b.orphaned) > 0:
i = newStreamingIterator(b)
case len(b.insts) > 0:
i = &instanceIterator{
inst: b.instance,
a: buildInstances(b.cmd, b.insts),
i: -1,
}
default:
i = &instanceIterator{
inst: b.instance,
a: []*cue.Instance{b.instance},
i: -1,
}
b.instance = nil
}
if len(b.expressions) > 0 {
return &expressionIter{
iter: i,
expr: b.expressions,
i: len(b.expressions),
}
}
return i
}
type iterator interface {
scan() bool
value() cue.Value
instance() *cue.Instance // may return nil
file() *ast.File // may return nil
err() error
close()
id() string
}
type instanceIterator struct {
inst *cue.Instance
a []*cue.Instance
i int
e error
}
func (i *instanceIterator) scan() bool {
i.i++
return i.i < len(i.a) && i.e == nil
}
func (i *instanceIterator) close() {}
func (i *instanceIterator) err() error { return i.e }
func (i *instanceIterator) value() cue.Value {
v := i.a[i.i].Value()
if i.inst != nil {
v = v.Unify(i.inst.Value())
}
return v
}
func (i *instanceIterator) instance() *cue.Instance {
if i.i >= len(i.a) {
return nil
}
return i.a[i.i]
}
func (i *instanceIterator) file() *ast.File { return nil }
func (i *instanceIterator) id() string { return i.a[i.i].Dir }
type streamingIterator struct {
r *cue.Runtime
b *buildPlan
cfg *encoding.Config
a []*decoderInfo
dec *encoding.Decoder
v cue.Value
f *ast.File
e error
}
func newStreamingIterator(b *buildPlan) *streamingIterator {
i := &streamingIterator{
cfg: b.encConfig,
a: b.orphaned,
b: b,
}
// TODO: use orphanedSchema
i.r = &cue.Runtime{}
if v := b.encConfig.Schema; v.Exists() {
i.r = value.ConvertToRuntime(v.Context())
}
return i
}
func (i *streamingIterator) file() *ast.File { return i.f }
func (i *streamingIterator) value() cue.Value { return i.v }
func (i *streamingIterator) instance() *cue.Instance { return nil }
func (i *streamingIterator) id() string {
return ""
}
func (i *streamingIterator) scan() bool {
if i.e != nil {
return false
}
// advance to next value
if i.dec != nil && !i.dec.Done() {
i.dec.Next()
}
// advance to next stream if necessary
for i.dec == nil || i.dec.Done() {
if i.dec != nil {
i.dec.Close()
i.dec = nil
}
if len(i.a) == 0 {
return false
}
i.dec = i.a[0].dec(i.b)
if i.e = i.dec.Err(); i.e != nil {
return false
}
i.a = i.a[1:]
}
// compose value
i.f = i.dec.File()
inst, err := i.r.CompileFile(i.f)
if err != nil {
i.e = err
return false
}
i.v = inst.Value()
if schema := i.b.encConfig.Schema; schema.Exists() {
i.e = schema.Err()
if i.e == nil {
i.v = i.v.Unify(schema) // TODO(required fields): don't merge in schema
i.e = i.v.Err()
}
i.f = nil
}
return i.e == nil
}
func (i *streamingIterator) close() {
if i.dec != nil {
i.dec.Close()
i.dec = nil
}
}
func (i *streamingIterator) err() error {
if i.dec != nil {
if err := i.dec.Err(); err != nil {
return err
}
}
return i.e
}
type expressionIter struct {
iter iterator
expr []ast.Expr
i int
}
func (i *expressionIter) err() error { return i.iter.err() }
func (i *expressionIter) close() { i.iter.close() }
func (i *expressionIter) id() string { return i.iter.id() }
func (i *expressionIter) scan() bool {
i.i++
if i.i < len(i.expr) {
return true
}
if !i.iter.scan() {
return false
}
i.i = 0
return true
}
func (i *expressionIter) file() *ast.File { return nil }
func (i *expressionIter) instance() *cue.Instance { return nil }
func (i *expressionIter) value() cue.Value {
if len(i.expr) == 0 {
return i.iter.value()
}
v := i.iter.value()
path := ""
if inst := i.iter.instance(); inst != nil {
path = inst.ID()
}
return v.Context().BuildExpr(i.expr[i.i],
cue.Scope(v),
cue.InferBuiltins(true),
cue.ImportPath(path),
)
}
type config struct {
outMode filetypes.Mode
fileFilter string
reFile *regexp.Regexp
encoding build.Encoding
interpretation build.Interpretation
overrideDefault bool
noMerge bool // do not merge individual data files.
loadCfg *load.Config
}
func newBuildPlan(cmd *Command, args []string, cfg *config) (p *buildPlan, err error) {
if cfg == nil {
cfg = &defaultConfig
}
if cfg.loadCfg == nil {
cfg.loadCfg = defaultConfig.loadCfg
}
cfg.loadCfg.Stdin = cmd.InOrStdin()
p = &buildPlan{cfg: cfg, cmd: cmd, importing: cfg.loadCfg.DataFiles}
if err := p.parseFlags(); err != nil {
return nil, err
}
re, err := regexp.Compile(p.cfg.fileFilter)
if err != nil {
return nil, err
}
cfg.reFile = re
if err := setTags(cmd.Flags(), cfg.loadCfg); err != nil {
return nil, err
}
return p, nil
}
func (p *buildPlan) matchFile(file string) bool {
return p.cfg.reFile.MatchString(file)
}
func setTags(f *pflag.FlagSet, cfg *load.Config) error {
tags, _ := f.GetStringArray(string(flagInject))
cfg.Tags = tags
if b, _ := f.GetBool(string(flagInjectVars)); b {
cfg.TagVars = load.DefaultTagVars()
}
return nil
}
type decoderInfo struct {
file *build.File
d *encoding.Decoder // may be nil if delayed
}
func (d *decoderInfo) dec(b *buildPlan) *encoding.Decoder {
if d.d == nil {
d.d = encoding.NewDecoder(d.file, b.encConfig)
}
return d.d
}
func (d *decoderInfo) close() {
if d.d != nil {
d.d.Close()
}
}
// getDecoders takes the orphaned files of the given instance and splits them in
// schemas and values, saving the build.File and encoding.Decoder in the
// returned slices. It is up to the caller to Close any of the decoders that are
// returned.
func (p *buildPlan) getDecoders(b *build.Instance) (schemas, values []*decoderInfo, err error) {
files := b.OrphanedFiles
if p.cfg.overrideDefault {
files = append(files, b.UnknownFiles...)
}
for _, f := range files {
if !p.matchFile(f.Filename) && f.Filename != "-" {
continue
}
if p.cfg.overrideDefault {
f.Encoding = p.cfg.encoding
f.Interpretation = p.cfg.interpretation
}
switch f.Encoding {
case build.Protobuf, build.YAML, build.JSON, build.JSONL,
build.Text, build.Binary:
if f.Interpretation == build.ProtobufJSON {
// Need a schema.
values = append(values, &decoderInfo{f, nil})
continue
}
case build.TextProto:
if p.importing {
return schemas, values, errors.Newf(token.NoPos,
"cannot import textproto files")
}
// Needs to be decoded after any schema.
values = append(values, &decoderInfo{f, nil})
continue
default:
return schemas, values, errors.Newf(token.NoPos,
"unsupported encoding %q", f.Encoding)
}
// We add the module root to the path if there is a module defined.
c := *p.encConfig
if b.Module != "" {
c.ProtoPath = append(c.ProtoPath, b.Root)
}
d := encoding.NewDecoder(f, &c)
fi, err := filetypes.FromFile(f, p.cfg.outMode)
if err != nil {
return schemas, values, err
}
switch {
// case !fi.Schema: // TODO: value/schema/auto
// values = append(values, d)
case fi.Form != build.Schema && fi.Form != build.Final:
values = append(values, &decoderInfo{f, d})
case f.Interpretation != build.Auto:
schemas = append(schemas, &decoderInfo{f, d})
case d.Interpretation() == "":
values = append(values, &decoderInfo{f, d})
default:
schemas = append(schemas, &decoderInfo{f, d})
}
}
return schemas, values, nil
}
// importFiles imports orphan files for existing instances. Note that during
// import, both schemas and non-schemas are placed (TODO: should we allow schema
// mode here as well? It seems that the existing package should have enough
// typing to allow for schemas).
//
// It is a separate call to allow closing decoders between processing each
// package.
func (p *buildPlan) importFiles(b *build.Instance) error {
// TODO: assume textproto is imported at top-level or just ignore them.
schemas, values, err := p.getDecoders(b)
for _, d := range append(schemas, values...) {
defer d.close()
}
if err != nil {
return err
}
return p.placeOrphans(b, append(schemas, values...))
}
func parseArgs(cmd *Command, args []string, cfg *config) (p *buildPlan, err error) {
p, err = newBuildPlan(cmd, args, cfg)
if err != nil {
return nil, err
}
builds := loadFromArgs(cmd, args, cfg.loadCfg)
if builds == nil {
return nil, errors.Newf(token.NoPos, "invalid args")
}
if err := p.parsePlacementFlags(); err != nil {
return nil, err
}
for _, b := range builds {
if b.Err != nil {
return nil, b.Err
}
switch {
case !b.User:
if p.importing {
if err := p.importFiles(b); err != nil {
return nil, err
}
}
p.insts = append(p.insts, b)
case p.orphanInstance != nil:
return nil, errors.Newf(token.NoPos,
"builds contain two file packages")
default:
p.orphanInstance = b
}
}
if b := p.orphanInstance; b != nil {
schemas, values, err := p.getDecoders(b)
for _, d := range append(schemas, values...) {
defer d.close()
}
if err != nil {
return nil, err
}
if values == nil {
values, schemas = schemas, values
}
for _, di := range schemas {
d := di.dec(p)
for ; !d.Done(); d.Next() {
if err := b.AddSyntax(d.File()); err != nil {
return nil, err
}
}
if err := d.Err(); err != nil {
return nil, err
}
}
if len(p.insts) > 1 && p.schema != nil {
return nil, errors.Newf(token.NoPos,
"cannot use --schema/-d with flag more than one schema")
}
var schema *build.Instance
switch n := len(p.insts); n {
default:
return nil, errors.Newf(token.NoPos,
"too many packages defined (%d) in combination with files", n)
case 1:
if len(schemas) > 0 {
return nil, errors.Newf(token.NoPos,
"cannot combine packages with individual schema files")
}
schema = p.insts[0]
p.insts = nil
case 0:
bb := *b
schema = &bb
b.BuildFiles = nil
b.Files = nil
}
if schema != nil && len(schema.Files) > 0 {
inst := buildInstances(p.cmd, []*build.Instance{schema})[0]
if inst.Err != nil {
return nil, err
}
p.instance = inst
p.encConfig.Schema = inst.Value()
if p.schema != nil {
v := inst.Eval(p.schema)
if err := v.Err(); err != nil {
return nil, err
}
p.encConfig.Schema = v
}
}
switch {
default:
fallthrough
case p.schema != nil:
p.orphaned = values
case p.mergeData, p.usePlacement(), p.importing:
if err = p.placeOrphans(b, values); err != nil {
return nil, err
}
}
if len(b.Files) > 0 {
p.insts = append(p.insts, b)
}
}
if len(p.expressions) > 1 {
p.encConfig.Stream = true
}
return p, nil
}
func (b *buildPlan) parseFlags() (err error) {
b.mergeData = !b.cfg.noMerge && flagMerge.Bool(b.cmd)
out := flagOut.String(b.cmd)
outFile := flagOutFile.String(b.cmd)
if strings.Contains(out, ":") && strings.Contains(outFile, ":") {
return errors.Newf(token.NoPos,
"cannot specify qualifier in both --out and --outfile")
}
if outFile == "" {
outFile = "-"
}
if out != "" {
outFile = out + ":" + outFile
}
b.outFile, err = filetypes.ParseFile(outFile, b.cfg.outMode)
if err != nil {
return err
}
for _, e := range flagExpression.StringArray(b.cmd) {
expr, err := parser.ParseExpr("--expression", e)
if err != nil {
return err
}
b.expressions = append(b.expressions, expr)
}
if s := flagSchema.String(b.cmd); s != "" {
b.schema, err = parser.ParseExpr("--schema", s)
if err != nil {
return err
}
}
if s := flagGlob.String(b.cmd); s != "" {
// Set a default file filter to only include json and yaml files
b.cfg.fileFilter = s
}
b.encConfig = &encoding.Config{
Force: flagForce.Bool(b.cmd),
Mode: b.cfg.outMode,
Stdin: b.cmd.InOrStdin(),
Stdout: b.cmd.OutOrStdout(),
ProtoPath: flagProtoPath.StringArray(b.cmd),
AllErrors: flagAllErrors.Bool(b.cmd),
PkgName: flagPackage.String(b.cmd),
Strict: flagStrict.Bool(b.cmd),
}
return nil
}
func buildInstances(cmd *Command, binst []*build.Instance) []*cue.Instance {
// TODO:
// If there are no files and User is true, then use those?
// Always use all files in user mode?
instances := cue.Build(binst)
for _, inst := range instances {
// TODO: consider merging errors of multiple files, but ensure
// duplicates are removed.
exitIfErr(cmd, inst, inst.Err, true)
}
if flagIgnore.Bool(cmd) {
return instances
}
// TODO check errors after the fact in case of ignore.
for _, inst := range instances {
// TODO: consider merging errors of multiple files, but ensure
// duplicates are removed.
exitIfErr(cmd, inst, inst.Value().Validate(), !flagIgnore.Bool(cmd))
}
return instances
}
func buildToolInstances(cmd *Command, binst []*build.Instance) ([]*cue.Instance, error) {
instances := cue.Build(binst)
for _, inst := range instances {
if inst.Err != nil {
return nil, inst.Err
}
}
// TODO check errors after the fact in case of ignore.
for _, inst := range instances {
if err := inst.Value().Validate(); err != nil {
return nil, err
}
}
return instances, nil
}
func buildTools(cmd *Command, args []string) (*cue.Instance, error) {
cfg := &load.Config{
Tools: true,
}
f := cmd.cmd.Flags()
if err := setTags(f, cfg); err != nil {
return nil, err
}
binst := loadFromArgs(cmd, args, cfg)
if len(binst) == 0 {
return nil, nil
}
included := map[string]bool{}
ti := binst[0].Context().NewInstance(binst[0].Root, nil)
for _, inst := range binst {
k := 0
for _, f := range inst.Files {
if strings.HasSuffix(f.Filename, "_tool.cue") {
if !included[f.Filename] {
_ = ti.AddSyntax(f)
included[f.Filename] = true
}
continue
}
inst.Files[k] = f
k++
}
inst.Files = inst.Files[:k]
}
insts, err := buildToolInstances(cmd, binst)
if err != nil {
return nil, err
}
inst := insts[0]
if len(insts) > 1 {
inst = cue.Merge(insts...)
}
r := value.ConvertToRuntime(inst.Value().Context())
for _, b := range binst {
for _, i := range b.Imports {
if _, err := r.Build(i); err != nil {
return nil, err
}
}
}
// Set path equal to the package from which it is loading.
ti.ImportPath = binst[0].ImportPath
inst = inst.Build(ti)
return inst, inst.Err
}
func shortFile(root string, f *build.File) string {
dir, _ := filepath.Rel(root, f.Filename)
if dir == "" {
return f.Filename
}
if !filepath.IsAbs(dir) {
dir = "." + string(filepath.Separator) + dir
}
return dir
}
| [
"\"CUE_SYNTAX_OVERRIDE\"",
"\"LC_ALL\"",
"\"LANG\""
] | [] | [
"CUE_SYNTAX_OVERRIDE",
"LC_ALL",
"LANG"
] | [] | ["CUE_SYNTAX_OVERRIDE", "LC_ALL", "LANG"] | go | 3 | 0 | |
Sav_s_nb_30_m/setrun.py | # encoding: utf-8
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
# import netCDF4
import os
import datetime
import shutil
import gzip
import numpy as np
from clawpack.geoclaw.surge.storm import Storm
import clawpack.clawutil as clawutil
# Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
# Scratch directory for storing topo and storm files:
scratch_dir = os.path.join(os.environ["CLAW"], 'geoclaw', 'scratch')
# ------------------------------
def setrun(claw_pkg='geoclaw'):
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
# ------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
# ------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 0
clawdata.upper[0] = 5000000.0
clawdata.lower[1] = 0
clawdata.upper[1] = 5000000.0
# Number of grid cells: Coarsest grid
clawdata.num_cells[0] = 200 #25km grids
clawdata.num_cells[1] = 200
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# First three are from shallow GeoClaw, fourth is friction and last 3 are
# storm fields
clawdata.num_aux = 3 + 1 + 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
#clawdata.t0 = -days2seconds(7)
clawdata.t0 = 0
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
# --------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style == 1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.tfinal = days2seconds(8)
recurrence = 1
clawdata.num_output_times = int((clawdata.tfinal - clawdata.t0) *
recurrence / (60**2 * 24))
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True
clawdata.output_format = 'ascii' # 'ascii' or 'binary'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 9000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none'
# ==> no source term (src routine never called)
# src_split == 1 or 'godunov'
# ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang'
# ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif np.abs(clawdata.checkpt_style) == 1:
# Checkpoint only at tfinal.
pass
elif np.abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1, 0.15]
elif np.abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least mxnest-1)
amrdata.refinement_ratios_x = [10,20,5] # 25km -> 2.5 km -> 125 m -> 25 m
amrdata.refinement_ratios_y = [10,20,5]
amrdata.refinement_ratios_t = [10,20,5]
# max number of refinement levels:
# THIS WAS TOO FAST AND THE HIGHEST RESOLUTION AMR WAS NOT USED
# amrdata.amr_levels_max = 5
# # List of refinement ratios at each level (length at least mxnest-1)
# amrdata.refinement_ratios_x = [10,5,4,5] # 25km -> 2.5 km -> 500m -> 125 m -> 25 m
# amrdata.refinement_ratios_y = [10,5,4,5]
# amrdata.refinement_ratios_t = [10,5,4,5]
# # THIS WAS ABOUT 30-40 min but some areas were not refined enough.
# # max number of refinement levels:
# amrdata.amr_levels_max = 4
# # List of refinement ratios at each level (length at least mxnest-1)
# amrdata.refinement_ratios_x = [10,20,5] # 25km -> 2.5 km -> 125 m -> 25 m
# amrdata.refinement_ratios_y = [10,20,5]
# amrdata.refinement_ratios_t = [10,20,5]
# # THIS TAKES A LONG TIME (>16 hours) TO RUN! Killed this
# amrdata.refinement_ratios_x = [10,10,50] # 25km -> 2.5 km -> 250 m -> 5 m
# amrdata.refinement_ratios_y = [10,10,50]
# amrdata.refinement_ratios_t = [10,10,50]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'center', 'yleft', 'center', 'center',
'center', 'center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Regions for all topographies
regions.append([1, 1, 0., 1.e10, 0, 5000000., 0.,5000000.]) #Whole region
regions.append([1, 2, 0., 1.e10, (2500-25)*1000., (2500+25)*1000., (5000-300)*1000., 5000*1000.]) #50 km wide and 300 km long
regions.append([1, 3, 0., 1.e10, (2500-15)*1000., (2500+15)*1000., (5000-30)*1000., 5000*1000.]) # 30 km wide and 30 km long
# # For Mel_s_b
# regions.append([1, 4, 0., 1.e10, (2500-15)*1000., (2500.5)*1000., (5000-17)*1000., (5000-7)*1000.]) # area around barrier island
# regions.append([1, 4, 0., 1.e10, (2500-4)*1000., (2500.5)*1000., (5000-17)*1000., (5000)*1000.]) # area around bay
# # For Sav_s_b_3_m.txt
# regions.append([1, 4, 0., 1.e10, (2500-15)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000-30+15)*1000.]) # area around barrier island
# regions.append([1, 4, 0., 1.e10, (2500-4)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000)*1000.]) # area around bay
# # For Sav_s_b_30_m.txt
# regions.append([1, 5, 0., 1.e10, (2500-15)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000-30+15)*1000.]) # area around barrier island
# regions.append([1, 5, 0., 1.e10, (2500-5)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000)*1000.]) # area around bay
# For Sav_s_b_30_m.txt
regions.append([3, 4, 0., 1.e10, (2500-15)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000-30+15)*1000.]) # area around barrier island
# regions.append([3, 4, 0., 1.e10, (2500-7)*1000., (2500.5)*1000., (5000-30+6)*1000., (5000)*1000.]) # area around bay
# Gauges from Ike AWR paper (2011 Dawson et al)
# Gauges from Path of storm 2
# # To make grid of 50 gauges in study area of Mel_s_b
# shore_gauges_y = 4985*1000
# shore_guages_x = np.linspace(2485*1000,2500*1000,num = 10)
# channel_gauges_y = 4988*1000
# channel_guages_x = np.linspace(2485*1000,2500*1000,num = 10)
# bay_gauges_y = np.linspace(4985*1000,5000*1000,num = 10)
# bay_guages_x = 2500*1000
# gauges_y = np.linspace(4985*1000,5000*1000,num = 10)
# gauges_x = np.linspace(2485*1000,2500*1000,num = 5)
# gauge_num=1
# for iy in range(len(gauges_y)):
# for ix in range(len(gauges_x)):
# rundata.gaugedata.gauges.append([gauge_num, gauges_x[ix], gauges_y[iy],
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# gauge_num+=1
# # For Mel_s_b
# import numpy as np
# gauge_points = np.array([[(5000-30)*1000+15700,(5000-30)*1000+16300,(5000-30)*1000+19800,(5000-30)*1000+24300,(5000-30)*1000+29800,(5000-30)*1000+18000,
# (5000-30)*1000+24300,(5000-30)*1000+24300],
# [2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2500*1000,(2500-1.5)*1000]])
# gauge_points = np.transpose(gauge_points)
# gauge_points = np.flip(gauge_points,1)
# for row in range(len(gauge_points)):
# rundata.gaugedata.gauges.append([int(row+1), gauge_points[row,0], gauge_points[row,1],rundata.clawdata.t0,rundata.clawdata.tfinal])
# For Sav_s_b_3_m.txt
import numpy as np
# gauge_points = np.array([[(5000-30)*1000+8000,(5000-30)*1000+10000,(5000-30)*1000+13000,(5000-30)*1000+15000,(5000-30)*1000+22000,(5000-30)*1000+24500,
# (5000-30)*1000+24300,(5000-30)*1000+24300,(5000-30)*1000+24300,(5000-30)*1000+10000],
# [2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2492.5*1000,2500*1000,(2500-1.5)*1000,(2500+1.5)*1000,2507.5*1000]])
gauge_points = np.array([[(5000-30)*1000+8000,
(5000-30)*1000+14500,(5000-30)*1000+14500,(5000-30)*1000+14500,
(5000-30)*1000+16000,(5000-30)*1000+16000,(5000-30)*1000+16000,
(5000-30)*1000+19400,(5000-30)*1000+19400,(5000-30)*1000+19400,
(5000-30)*1000+24300,(5000-30)*1000+24300,
(5000-30)*1000+8000,(5000-30)*1000+10000,(5000-30)*1000+13000,
(5000-30)*1000+14500,(5000-30)*1000+16000,(5000-30)*1000+19400,(5000-30)*1000+22000],
[(2500-4)*1000,
(2500-4)*1000,(2500-2)*1000,(2500-6)*1000,
(2500-4)*1000,(2500-2)*1000,(2500-6)*1000,
(2500-4)*1000,(2500-2)*1000,(2500-6)*1000,
2500*1000,(2500-1.5)*1000,
(2500)*1000,(2500)*1000,(2500)*1000,(2500)*1000,(2500)*1000,(2500)*1000,(2500)*1000]])
gauge_points = np.transpose(gauge_points)
gauge_points = np.flip(gauge_points,1)
for row in range(len(gauge_points)):
rundata.gaugedata.gauges.append([int(row+1), gauge_points[row,0], gauge_points[row,1],rundata.clawdata.t0,rundata.clawdata.tfinal])
# # To set individual gauges
# rundata.gaugedata.gauges.append([1, 2500*1000, 4990*1000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# rundata.gaugedata.gauges.append([2, 2500*1000, 4991*1000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# rundata.gaugedata.gauges.append([3, 2495*1000, 4990*1000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# rundata.gaugedata.gauges.append([2, 1850000, 2000000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# rundata.gaugedata.gauges.append([3, 2400000, 2900000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# rundata.gaugedata.gauges.append([4, 2575000, 4500000,
# rundata.clawdata.t0,
# rundata.clawdata.tfinal])
# Force the gauges to also record the wind and pressure fields
rundata.gaugedata.aux_out_fields = [4, 5, 6]
# ------------------------------------------------------------------
# GeoClaw specific parameters:
# ------------------------------------------------------------------
rundata = setgeo(rundata)
return rundata
# end of function setrun
# ----------------------
# -------------------
def setgeo(rundata):
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
geo_data = rundata.geo_data
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 1
geo_data.earth_radius = 6367.5e3
geo_data.rho = 1025.0
geo_data.rho_air = 1.15
geo_data.ambient_pressure = 101.3e3
# == Forcing Options
geo_data.coriolis_forcing = True
geo_data.friction_forcing = True
geo_data.friction_depth = 1e10
# == Algorithm and Initial Conditions ==
# Note that in the original paper due to gulf summer swelling this was set
# to 0.28
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-2
# Refinement Criteria
refine_data = rundata.refinement_data
refine_data.wave_tolerance = 1.0
refine_data.speed_tolerance = [1.0, 2.0, 3.0, 4.0]
refine_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
topo_data.topofiles = []
# for topography, append lines of the form
# [topotype, fname]
# See regions for control over these regions, need better bathy data for
# the smaller domains
# clawutil.data.get_remote_file(
# "http://www.columbia.edu/~ktm2132/bathy/gulf_caribbean.tt3.tar.bz2")
# topo_hydro_dir = '/home/jovyan/data/topo_files_output/'
# topo_fine_path = os.path.join(topo_hydro_dir, 'Mel_s_b_3_m.txt')
# topo_coarse_path = os.path.join(topo_hydro_dir, 'Mel_s_b_coarse_m.txt')
# topo_data.topofiles.append([3, topo_fine_path])
# topo_data.topofiles.append([3, topo_coarse_path])
# topo_fine_path = os.path.join(topo_hydro_dir, 'Melbourne_FL_m.txt')
# topo_fine_path_27 = os.path.join(topo_hydro_dir, 'Melbourne_FL_27_m.txt')
# topo_coarse_path = os.path.join(topo_hydro_dir, 'Melbourne_FL_coarse_m.txt')
# #topo_data.topofiles.append([3, topo_fine_path])
# topo_data.topofiles.append([3, topo_fine_path_27])
# topo_data.topofiles.append([3, topo_coarse_path])
# Savannah_GA_s_b_3m.txt
topo_hydro_dir = '/home/jovyan/data/topo_files_output/'
# topo_fine_path = os.path.join(topo_hydro_dir, 'Sav_s_b_3_m.txt')
topo_fine_path_30 = os.path.join(topo_hydro_dir, 'Sav_s_nb_30_m.txt')
topo_coarse_path = os.path.join(topo_hydro_dir, 'Sav_s_nb_coarse_m.txt')
# topo_data.topofiles.append([3, topo_fine_path])
topo_data.topofiles.append([3, topo_fine_path_30])
topo_data.topofiles.append([3, topo_coarse_path])
# == setfixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# ================
# Set Surge Data
# ================
data = rundata.surge_data
# Source term controls
data.wind_forcing = True
data.drag_law = 1
data.pressure_forcing = True
data.display_landfall_time = True
# AMR parameters, m/s and m respectively
data.wind_refine = [20.0, 40.0, 60.0]
data.R_refine = [60.0e3, 40e3, 20e3]
# Storm parameters - Parameterized storm (Holland 1980)
data.storm_specification_type = 'holland80' # (type 1)
storm_dir = '/home/jovyan/data/hydroinformatics/syn_storm/'
storm_path = os.path.join(storm_dir, 'Path1.storm')
data.storm_file = storm_path
# # Convert ATCF data to GeoClaw format
# clawutil.data.get_remote_file(
# "http://ftp.nhc.noaa.gov/atcf/archive/2008/bal092008.dat.gz")
# atcf_path = os.path.join(scratch_dir, "bal092008.dat")
# # Note that the get_remote_file function does not support gzip files which
# # are not also tar files. The following code handles this
# with gzip.open(".".join((atcf_path, 'gz')), 'rb') as atcf_file, \
# open(atcf_path, 'w') as atcf_unzipped_file:
# atcf_unzipped_file.write(atcf_file.read().decode('ascii'))
# # Uncomment/comment out to use the old version of the Ike storm file
# # ike = Storm(path="old_ike.storm", file_format="ATCF")
# ike = Storm(path=atcf_path, file_format="ATCF")
# # Calculate landfall time - Need to specify as the file above does not
# # include this info (9/13/2008 ~ 7 UTC)
# ike.time_offset = datetime.datetime(2008, 9, 13, 7)
# ike.write(data.storm_file, file_format='geoclaw')
# =======================
# Set Variable Friction
# =======================
data = rundata.friction_data
# Variable friction
data.variable_friction = True
# Region based friction
# Entire domain
data.friction_regions.append([rundata.clawdata.lower,
rundata.clawdata.upper,
[np.infty, 0.0, -np.infty],
[0.030, 0.022]])
# # La-Tex Shelf
# data.friction_regions.append([(-98, 25.25), (-90, 30),
# [np.infty, -10.0, -200.0, -np.infty],
# [0.030, 0.012, 0.022]])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| [] | [] | [
"CLAW"
] | [] | ["CLAW"] | python | 1 | 0 | |
compiler/core/src/main/java/com/asakusafw/compiler/batch/experimental/DumpEnvironmentProcessor.java | /**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.compiler.batch.experimental;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.asakusafw.compiler.batch.AbstractWorkflowProcessor;
import com.asakusafw.compiler.batch.WorkDescriptionProcessor;
import com.asakusafw.compiler.batch.Workflow;
import com.asakusafw.compiler.batch.processor.JobFlowWorkDescriptionProcessor;
/**
* Dumps environment variables: {@code ASAKUSA_*} and system properties: {@code com.asakusafw.*}.
*/
public class DumpEnvironmentProcessor extends AbstractWorkflowProcessor {
static final Logger LOG = LoggerFactory.getLogger(DumpEnvironmentProcessor.class);
static final Charset ENCODING = StandardCharsets.UTF_8;
/**
* The output path.
*/
public static final String PATH = "etc/build.log"; //$NON-NLS-1$
/**
* The name prefix of target environment variables.
*/
public static final String PREFIX_ENV = "ASAKUSA_"; //$NON-NLS-1$
/**
* The key prefix of target system properties.
*/
public static final String PREFIX_SYSPROP = "com.asakusafw."; //$NON-NLS-1$
@Override
public Collection<Class<? extends WorkDescriptionProcessor<?>>> getDescriptionProcessors() {
List<Class<? extends WorkDescriptionProcessor<?>>> results = new ArrayList<>();
results.add(JobFlowWorkDescriptionProcessor.class);
return results;
}
@Override
public void process(Workflow workflow) throws IOException {
try (Context context = new Context(getEnvironment().openResource(PATH))) {
dumpInternal(context);
dumpEnv(context);
dumpSystemProperties(context);
}
}
private void dumpInternal(Context context) {
context.put("core.batchId = {0}", getEnvironment().getConfiguration().getBatchId()); //$NON-NLS-1$
context.put("core.buildId = {0}", getEnvironment().getBuildId()); //$NON-NLS-1$
}
private void dumpEnv(Context context) {
try {
SortedMap<String, String> map = sortFilter(System.getenv(), PREFIX_ENV);
for (Map.Entry<String, ?> entry : map.entrySet()) {
context.put("{0} = {1}", entry.getKey(), entry.getValue()); //$NON-NLS-1$
}
} catch (SecurityException e) {
LOG.warn(Messages.getString(
"DumpEnvironmentProcessor.warnFailedToObtainEnvironmentVariable"), e); //$NON-NLS-1$
}
}
private void dumpSystemProperties(Context context) {
try {
SortedMap<String, Object> map = sortFilter(System.getProperties(), PREFIX_SYSPROP);
for (Map.Entry<String, ?> entry : map.entrySet()) {
context.put("{0} = {1}", entry.getKey(), entry.getValue()); //$NON-NLS-1$
}
} catch (SecurityException e) {
LOG.warn(Messages.getString(
"DumpEnvironmentProcessor.warnFailedToObtainSystemProperty"), e); //$NON-NLS-1$
}
}
private <T> SortedMap<String, T> sortFilter(Map<?, T> map, String prefix) {
assert map != null;
assert prefix != null;
SortedMap<String, T> results = new TreeMap<>();
for (Map.Entry<?, T> entry : map.entrySet()) {
String key = String.valueOf(entry.getKey());
if (key.startsWith(prefix)) {
results.put(key, entry.getValue());
}
}
return results;
}
private static class Context implements Closeable {
private final PrintWriter writer;
Context(OutputStream output) {
assert output != null;
writer = new PrintWriter(new OutputStreamWriter(output, ENCODING));
}
public void put(String pattern, Object... arguments) {
assert pattern != null;
assert arguments != null;
String text;
if (arguments.length == 0) {
text = pattern;
} else {
text = MessageFormat.format(pattern, arguments);
}
writer.println(text);
LOG.debug(text);
}
@Override
public void close() throws IOException {
writer.close();
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
refprice/chainlink.go | package refprice
import (
"context"
"errors"
"fmt"
"github.com/KyberNetwork/cache/libs/contracts"
etherCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/ethclient"
"log"
"math/big"
"os"
"time"
)
type ChainlinkFetcher struct {
storage *ChainlinkContractStorage
client *ethclient.Client
}
func NewChainlinkFetcher() *ChainlinkFetcher {
clientIns, err := ethclient.Dial(os.Getenv("NODE_ENDPOINT"))
if err != nil {
log.Fatal("missing `NODE_ENDPOINT` env")
}
return &ChainlinkFetcher{
storage: NewChainlinkContractStorage(),
client: clientIns,
}
}
func (f *ChainlinkFetcher) GetRefPrice(base, quote string) (*big.Float, error) {
kyberEnv := os.Getenv("KYBER_ENV")
if kyberEnv != "production" {
return nil, errors.New(fmt.Sprintf("cannot get price from chainlink for env %v", kyberEnv))
}
contract := f.storage.GetContract(base, quote)
if contract.Address == "" {
return nil, errors.New(fmt.Sprintf("cannot get chainlink contract for %v_%v", base, quote))
}
price, err := f.fetchPrice(contract.Address)
if err != nil {
return nil, err
}
tokenPrice := GetTokenPrice(price, contract.Multiply)
return tokenPrice, nil
}
// fetchPrice fetch price from contract
func (f *ChainlinkFetcher) fetchPrice(address string) (*big.Int, error) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
opts := &bind.CallOpts{
Context: ctx,
}
aggrator, err := contracts.NewAggregator(etherCommon.HexToAddress(address), f.client)
if err != nil {
log.Print(err)
return nil, err
}
return aggrator.LatestAnswer(opts)
}
func GetTokenPrice(price *big.Int, multiplier *big.Int) *big.Float {
priceF := new(big.Float).SetInt(price)
mulF := new(big.Float).SetInt(multiplier)
result := new(big.Float).Quo(priceF, mulF)
return result
}
| [
"\"NODE_ENDPOINT\"",
"\"KYBER_ENV\""
] | [] | [
"KYBER_ENV",
"NODE_ENDPOINT"
] | [] | ["KYBER_ENV", "NODE_ENDPOINT"] | go | 2 | 0 | |
cmd/examples/main.go | // Copyright © 2018 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"log"
"os"
database "github.com/banzaicloud/bank-vaults/pkg/sdk/db"
"github.com/banzaicloud/bank-vaults/pkg/sdk/vault"
"github.com/hashicorp/vault/api"
)
func vaultExample() {
vaultPath := "kubernetes"
if path := os.Getenv("VAULT_PATH"); path != "" {
vaultPath = path
}
config := api.DefaultConfig()
if config.Error != nil {
log.Fatal(config.Error)
}
client, err := vault.NewClientFromConfig(
config,
vault.ClientAuthPath(vaultPath),
)
if err != nil {
log.Fatal(err)
}
log.Println("Created Vault client")
secret, err := client.RawClient().Logical().List("secret/metadata/accounts")
if err != nil {
log.Fatal(err)
}
if secret != nil {
for _, v := range secret.Data {
log.Printf("-> %+v", v)
for _, v := range v.([]interface{}) {
log.Printf(" -> %+v", v)
}
}
log.Println("Finished reading Vault")
} else {
log.Fatal("Found no data in vault")
}
}
func gormExample() {
secretSource, err := database.DynamicSecretDataSource("mysql", "my-role@tcp(127.0.0.1:3306)/sparky?charset=utf8&parseTime=True&loc=Local")
if err != nil {
log.Fatal(err)
}
log.Printf("use this with GORM:\ndb, err := gorm.Open(\"mysql\", \"%s\")", secretSource)
}
// REQUIRED to start a Vault dev server with:
// vault server -dev &
func main() {
os.Setenv("VAULT_ADDR", "https://vault.default:8200")
os.Setenv("VAULT_SKIP_VERIFY", "true")
vaultExample()
}
| [
"\"VAULT_PATH\""
] | [] | [
"VAULT_PATH"
] | [] | ["VAULT_PATH"] | go | 1 | 0 | |
publish-results-pr/internal/appargs/args_githubaction_test.go | package appargs
import (
"os"
"path"
"reflect"
"strings"
"testing"
)
func TestGitHubActionArgs_ErrorWhenMissingEnv(t *testing.T) {
if os.Getenv("GITHUB_ACTIONS") != "" {
t.Skip("Skipping test in GitHub Actions")
return
}
envs := map[string]string{
"GITHUB_ACTIONS": "true",
"GITHUB_REPOSITORY": "org/repo",
"GITHUB_REF": "main",
"GITHUB_SHA": "aa11bbcc33",
"GITHUB_EVENT_PATH": "path/to.json",
"INPUT_GITHUB_TOKEN": "ghtokghtok",
}
for envToIgnore := range envs {
t.Run(envToIgnore, func(t *testing.T) {
for key, value := range envs {
if key != envToIgnore {
t.Setenv(key, value)
}
}
r := NewGitHubActionArgsReader()
_, err := r.GetArgs()
if err == nil || !strings.Contains(err.Error(), envToIgnore) {
t.Errorf("argsFromGitHubAction should have failed with missing env variable %v", envToIgnore)
}
})
}
}
func TestGitHubActionArgs_ArgsReadFromEnv(t *testing.T) {
if os.Getenv("GITHUB_ACTIONS") != "" {
t.Skip("Skipping test in GitHub Actions")
return
}
tempDir := t.TempDir()
tempJsonFile := path.Join(tempDir, "ghe.json")
t.Setenv("GITHUB_EVENT_PATH", tempJsonFile)
os.WriteFile(tempJsonFile, []byte(`{"pull_request":{"number":123}}`), 0666)
envs := map[string]string{
"GITHUB_ACTIONS": "true",
"GITHUB_REPOSITORY": "org/repo",
"GITHUB_REF": "main",
"GITHUB_SHA": "aa11bbcc33",
"GITHUB_RUN_ID": "2112121212",
"INPUT_GITHUB_TOKEN": "ghtokghtok",
"INPUT_AHK_APPTOKEN": "tokentoken",
"INPUT_AHK_IMAGEEXT": ".ext",
"INPUT_AHK_APPSECRET": "secretsecret",
}
expected := AppArgs{
GitHubRepoFullName: "org/repo",
GitHubRepoOwner: "org",
GitHubRepoName: "repo",
GitHubBranch: "main",
GitCommitHash: "aa11bbcc33",
GitHubActionRunId: "2112121212",
GitHubPullRequestNum: 123,
GitHubToken: "ghtokghtok",
NeptunFileName: "neptun.txt",
ImageExtension: ".ext",
ResultFile: "result.txt",
AhkAppUrl: `https://ahk-grade-management.azurewebsites.net/api/evaluation-result`,
AhkAppToken: "tokentoken",
AhkAppSecret: "secretsecret",
}
for key, value := range envs {
t.Setenv(key, value)
}
r := NewGitHubActionArgsReader()
actual, err := r.GetArgs()
if err != nil {
t.Errorf("argsFromGitHubAction failed with %v", err)
return
}
if !reflect.DeepEqual(*actual, expected) {
t.Errorf("argsFromGitHubAction expected %+v got %+v", expected, *actual)
}
}
| [
"\"GITHUB_ACTIONS\"",
"\"GITHUB_ACTIONS\""
] | [] | [
"GITHUB_ACTIONS"
] | [] | ["GITHUB_ACTIONS"] | go | 1 | 0 | |
kcontainer/order-query-quarkus-ms/src/main/java/ibm/gse/orderqueryms/infrastructure/kafka/OrderAgent.java | package ibm.gse.orderqueryms.infrastructure.kafka;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Optional;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.Gson;
import ibm.gse.orderqueryms.app.AppRegistry;
import ibm.gse.orderqueryms.domain.model.Cancellation;
import ibm.gse.orderqueryms.domain.model.ContainerAssignment;
import ibm.gse.orderqueryms.domain.model.Order;
import ibm.gse.orderqueryms.domain.model.Rejection;
import ibm.gse.orderqueryms.domain.model.Spoil;
import ibm.gse.orderqueryms.domain.model.VoyageAssignment;
import ibm.gse.orderqueryms.domain.model.order.QueryOrder;
import ibm.gse.orderqueryms.domain.model.order.history.OrderHistory;
import ibm.gse.orderqueryms.domain.model.order.history.OrderHistoryInfo;
import ibm.gse.orderqueryms.infrastructure.events.AbstractEvent;
import ibm.gse.orderqueryms.infrastructure.events.EventListener;
import ibm.gse.orderqueryms.infrastructure.events.order.AssignContainerEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.AssignOrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.CancelOrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.ContainerDeliveredEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.CreateOrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.OrderCompletedEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.OrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.RejectOrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.SpoilOrderEvent;
import ibm.gse.orderqueryms.infrastructure.events.order.UpdateOrderEvent;
import ibm.gse.orderqueryms.infrastructure.repository.OrderDAO;
import ibm.gse.orderqueryms.infrastructure.repository.OrderHistoryDAO;
public class OrderAgent implements EventListener {
private static final Logger logger = LoggerFactory.getLogger(OrderAgent.class.getName());
private final KafkaConsumer<String, String> kafkaConsumer;
private final OrderDAO orderRepository;
private final OrderHistoryDAO orderHistoryRepository;
public OrderAgent() {
if (System.getenv("KAFKA_BROKERS") != null) {
Properties properties = ApplicationConfig.getOrderConsumerProperties("orderquery-orders-consumer");
kafkaConsumer = new KafkaConsumer<String, String>(properties);
this.kafkaConsumer.subscribe(Collections.singletonList(ApplicationConfig.getOrderTopic()));
} else {
kafkaConsumer = null;
}
orderRepository = AppRegistry.getInstance().orderRepository();
orderHistoryRepository = AppRegistry.getInstance().orderHistoryRepository();
}
public OrderAgent(KafkaConsumer<String, String> kafkaConsumer, OrderDAO orderRepository, OrderHistoryDAO orderHistoryRepository) {
this.kafkaConsumer = kafkaConsumer;
this.orderRepository = orderRepository;
this.orderHistoryRepository = orderHistoryRepository;
}
public List<OrderEvent> poll() {
ConsumerRecords<String, String> recs = kafkaConsumer.poll(ApplicationConfig.CONSUMER_POLL_TIMEOUT);
List<OrderEvent> result = new ArrayList<>();
for (ConsumerRecord<String, String> rec : recs) {
OrderEvent event = OrderEvent.deserialize(rec.value());
result.add(event);
}
return result;
}
public void safeClose() {
try {
kafkaConsumer.close(ApplicationConfig.CONSUMER_CLOSE_TIMEOUT);
} catch (Exception e) {
logger.warn("Failed closing Consumer", e);
}
}
@Override
public void handle(AbstractEvent orderEvent) {
try {
logger.info("@@@@ in handle " + new Gson().toJson(orderEvent));
// Processing current order state
if (orderEvent instanceof OrderEvent) {
orderEvent = (OrderEvent) orderEvent;
String orderID;
Optional<QueryOrder> orderQueryObject;
if (orderEvent != null) {
System.out.println("@@@@ in handle " + new Gson().toJson(orderEvent));
switch (orderEvent.getType()) {
case OrderEvent.TYPE_CREATED:
synchronized (orderRepository) {
Order o1 = ((CreateOrderEvent) orderEvent).getPayload();
QueryOrder qo = QueryOrder.newFromOrder(o1);
orderRepository.add(qo);
}
break;
case OrderEvent.TYPE_UPDATED:
synchronized (orderRepository) {
Order o2 = ((UpdateOrderEvent) orderEvent).getPayload();
orderID = o2.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.update(o2);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_ASSIGNED:
synchronized (orderRepository) {
VoyageAssignment voyageAssignment = ((AssignOrderEvent) orderEvent).getPayload();
orderID = voyageAssignment.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.assign(voyageAssignment);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_REJECTED:
synchronized (orderRepository) {
Rejection rejection = ((RejectOrderEvent) orderEvent).getPayload();
orderID = rejection.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.reject(rejection);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CONTAINER_ALLOCATED:
synchronized (orderRepository) {
ContainerAssignment container = ((AssignContainerEvent) orderEvent).getPayload();
orderID = container.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.assignContainer(container);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CONTAINER_DELIVERED:
synchronized (orderRepository) {
ContainerAssignment container = ((ContainerDeliveredEvent) orderEvent).getPayload();
orderID = container.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.containerDelivered(container);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CANCELLED:
synchronized (orderRepository) {
Cancellation cancellation = ((CancelOrderEvent) orderEvent).getPayload();
orderID = cancellation.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.cancel(cancellation);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_COMPLETED:
synchronized (orderRepository) {
Order order = ((OrderCompletedEvent) orderEvent).getPayload();
orderID = order.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.orderCompleted(order);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_SPOILT:
synchronized (orderRepository) {
Spoil spoil = ((SpoilOrderEvent) orderEvent).getPayload();
orderID = spoil.getOrderID();
orderQueryObject = orderRepository.getById(orderID);
if (orderQueryObject.isPresent()) {
QueryOrder qo = orderQueryObject.get();
qo.spoilOrder(spoil);
orderRepository.update(qo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
default:
logger.warn("Unknown event type: " + orderEvent);
}
}
/*
* Below is from OrderActionService... How does this merge with above?
*/
// Processing order history
Optional<OrderHistoryInfo> orderHistoryInfo; // was oqo
System.out.println("@@@@ in handle order action handling order" + new Gson().toJson(orderEvent));
switch (orderEvent.getType()) {
case OrderEvent.TYPE_CREATED:
synchronized (orderHistoryRepository) {
Order o1 = ((CreateOrderEvent) orderEvent).getPayload();
long timestampMillis = ((CreateOrderEvent) orderEvent).getTimestampMillis();
String action = ((CreateOrderEvent) orderEvent).getType();
OrderHistoryInfo orderActionItem = OrderHistoryInfo.newFromOrder(o1);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis, action);
orderHistoryRepository.addOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
}
break;
case OrderEvent.TYPE_UPDATED:
synchronized (orderHistoryRepository) {
Order o2 = ((UpdateOrderEvent) orderEvent).getPayload();
long timestampMillis = ((UpdateOrderEvent) orderEvent).getTimestampMillis();
String action = ((UpdateOrderEvent) orderEvent).getType();
orderID = o2.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.update(o2);
OrderHistory cqo = OrderHistory.newFromOrder(orderActionItem, timestampMillis, action);
orderHistoryRepository.updateOrder(cqo);
orderHistoryRepository.orderHistory(cqo);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_ASSIGNED:
synchronized (orderHistoryRepository) {
VoyageAssignment voyageAssignment = ((AssignOrderEvent) orderEvent).getPayload();
long timestampMillis = ((AssignOrderEvent) orderEvent).getTimestampMillis();
String action = ((AssignOrderEvent) orderEvent).getType();
orderID = voyageAssignment.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.assign(voyageAssignment);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_REJECTED:
synchronized (orderHistoryRepository) {
Rejection rejection = ((RejectOrderEvent) orderEvent).getPayload();
long timestampMillis = ((RejectOrderEvent) orderEvent).getTimestampMillis();
String action = ((RejectOrderEvent) orderEvent).getType();
orderID = rejection.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.reject(rejection);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CONTAINER_ALLOCATED:
synchronized (orderHistoryRepository) {
ContainerAssignment container = ((AssignContainerEvent) orderEvent).getPayload();
long timestampMillis = ((AssignContainerEvent) orderEvent).getTimestampMillis();
String action = ((AssignContainerEvent) orderEvent).getType();
orderID = container.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.assignContainer(container);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CONTAINER_DELIVERED:
synchronized (orderHistoryRepository) {
ContainerAssignment container = ((ContainerDeliveredEvent) orderEvent).getPayload();
long timestampMillis = ((ContainerDeliveredEvent) orderEvent).getTimestampMillis();
String action = ((ContainerDeliveredEvent) orderEvent).getType();
orderID = container.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.containerDelivered(container);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_CANCELLED:
synchronized (orderHistoryRepository) {
Cancellation cancellation = ((CancelOrderEvent) orderEvent).getPayload();
long timestampMillis = ((CancelOrderEvent) orderEvent).getTimestampMillis();
String action = ((CancelOrderEvent) orderEvent).getType();
orderID = cancellation.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.cancel(cancellation);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_COMPLETED:
synchronized (orderHistoryRepository) {
Order order = ((OrderCompletedEvent) orderEvent).getPayload();
long timestampMillis = ((OrderCompletedEvent) orderEvent).getTimestampMillis();
String action = ((OrderCompletedEvent) orderEvent).getType();
orderID = order.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.orderCompleted(order);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
case OrderEvent.TYPE_SPOILT:
synchronized (orderHistoryRepository) {
Spoil spoil = ((SpoilOrderEvent) orderEvent).getPayload();
long timestampMillis = ((SpoilOrderEvent) orderEvent).getTimestampMillis();
String action = ((SpoilOrderEvent) orderEvent).getType();
orderID = spoil.getOrderID();
orderHistoryInfo = orderHistoryRepository.getByOrderId(orderID);
if (orderHistoryInfo.isPresent()) {
OrderHistoryInfo orderActionItem = orderHistoryInfo.get();
orderActionItem.spoil(spoil);
OrderHistory orderAction = OrderHistory.newFromOrder(orderActionItem, timestampMillis,
action);
orderHistoryRepository.updateOrder(orderAction);
orderHistoryRepository.orderHistory(orderAction);
} else {
throw new IllegalStateException("Cannot update - Unknown order Id " + orderID);
}
}
break;
default:
logger.warn("Unknown event type: " + orderEvent);
}
}
} catch (Exception e) {
logger.error((new Date()).toString() + " " + e.getMessage(), e);
}
}
}
| [
"\"KAFKA_BROKERS\""
] | [] | [
"KAFKA_BROKERS"
] | [] | ["KAFKA_BROKERS"] | java | 1 | 0 | |
qa/rpc-tests/maxblocksinflight.py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("CRIPTOREALD", "criptoreald"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
| [] | [] | [
"CRIPTOREALD"
] | [] | ["CRIPTOREALD"] | python | 1 | 0 | |
client/BrocConfig.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Description : manage config file $HOME/.broc.rc
Authors : zhousongsong([email protected])
Date : 2015-09-18 10:28:23
"""
import os
import sys
import ConfigParser
broc_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path.insert(0, broc_dir)
from dependency import BrocModule_pb2
class BrocConfig(object):
"""
this class manages the .broc.rc in $HOME
"""
def __init__(self):
"""
"""
self._file = os.path.join(os.environ['HOME'], '.broc.rc')
self._svn_repo_domain = 'https://svn.github.com'
self._git_repo_domain = 'https://github.com'
self._svn_postfix_branch = "BRANCH"
self._svn_postfix_tag = "PD_BL"
def __str__(self):
"""
"""
return "svn repo domain: %s\ngit repo domain: %s\n \
svn postfix branch: %s\nsvn postfix tag: %s"% (self._svn_repo_domain,
self._git_repo_domain,
self._svn_postfix_branch,
self._svn_postfix_tag)
def load(self):
"""
load broc configurations
Returns:
return (ret, msg)
if load successfully, ret is True
if load failed, ret is False and msg is detail content
"""
try:
# if configuration file does not exists in $HOME, create one
if not os.path.isfile(self._file):
cfgfile = open(self._file, 'w')
conf = ConfigParser.ConfigParser()
conf.add_section('repo')
conf.set('repo', 'svn_repo_domain', self._svn_repo_domain)
conf.set('repo', 'git_repo_domain', self._git_repo_domain)
conf.set('repo', 'svn_postfix_branch', 'BRANCH')
conf.set('repo', 'svn_postfix_tag', 'PD_BL')
conf.write(cfgfile)
cfgfile.close()
else:
cfgfile = open(self._file, 'r')
conf = ConfigParser.ConfigParser()
conf.read(self._file)
self._svn_repo_domain = conf.get('repo', 'svn_repo_domain')
self._git_repo_domain = conf.get('repo', 'git_repo_domain')
self._svn_postfix_branch = conf.get('repo', 'svn_postfix_branch')
self._svn_postfix_tag = conf.get('repo', 'svn_postfix_tag')
except ConfigParser.Error as e:
return (False, str(e))
return (True, '')
def RepoDomain(self, repo_type):
"""
return repository domain
Args:
repo_type : BrocMode_pb2.Module.EnumRepo
"""
if repo_type == BrocModule_pb2.Module.SVN:
return self._svn_repo_domain
elif repo_type == BrocModule_pb2.Module.GIT:
return self._git_repo_domain
def SVNPostfixBranch(self):
"""
return postfix of svn branch
"""
return self._svn_postfix_branch
def SVNPostfixTag(self):
"""
return postfix of svn tag
"""
return self._svn_postfix_tag
def Dump(self):
"""
dump broc config
"""
print("-- svn domain : %s" % self._svn_repo_domain)
print("-- git domain : %s" % self._git_repo_domain)
print("-- svn branch posfix : %s" % self._svn_postfix_branch)
print("-- svn tag postfix : %s" % self._svn_postfix_tag)
| [] | [] | [
"HOME"
] | [] | ["HOME"] | python | 1 | 0 | |
runnable-petri-server/src/it/java/com/wixpress/petri/RunnableServerIT.java | package com.wixpress.petri;
import com.ning.http.client.AsyncHttpClient;
import com.wixpress.guineapig.drivers.PetriBackofficeUiDriver;
import com.wixpress.petri.petri.FullPetriClient;
import com.wixpress.petri.test.TestBuilders;
import org.apache.commons.io.FileUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.net.ConnectException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
public class RunnableServerIT {
private static final int SERVER_STARTUP_RETRIES = 20;
private static int petriServerPort = 9011;
private static final String BASE_SERVER_ADDRESS = "http://localhost:" + petriServerPort + "/";
private static final String BASE_PETRI_API_ADDRESS = BASE_SERVER_ADDRESS + "petri";
private FullPetriClient fullPetriClient;
private Process petriServerProcess;
private PetriBackofficeUiDriver petriBackofficeUiDriver;
@Before
public void setUp() throws Exception {
removeDBFile();
fullPetriClient = PetriRPCClient.makeFullClientFor(BASE_PETRI_API_ADDRESS);
petriBackofficeUiDriver = new PetriBackofficeUiDriver(petriServerPort);
runServer();
}
@After
public void tearDown() throws Exception {
stopServer();
}
@Test
public void testServerInit() throws Exception {
assertThat(fullPetriClient.fetchAllExperiments(), hasSize(0));
petriBackofficeUiDriver.numberOfActiveExperimentsIs(0);
}
@Test
public void testServerPersistence() throws Exception {
fullPetriClient.addSpecs(Collections.singletonList(TestBuilders.abSpecBuilder("someSpec").build()));
assertThat(fullPetriClient.fetchSpecs(), hasSize(1));
restartServer();
assertThat(fullPetriClient.fetchSpecs(), hasSize(1));
}
public static void removeDBFile() throws Exception {
FileUtils.deleteQuietly(new File("petri.db.trace.db"));
FileUtils.deleteQuietly(new File("petri.db.mv.db"));
}
private void runServer() throws Exception {
runPetriServer();
waitForServer(ServerState.STARTED);
}
private void stopServer() throws Exception {
stopPetriServer();
waitForServer(ServerState.STOPPED);
}
private void restartServer() throws Exception {
stopServer();
runServer();
}
private void runPetriServer() throws Exception {
Map<String,String> d = System.getenv();
List<File> runnableJarFiles = FileUtils
.listFiles(new File("target"), new String[]{"jar"}, false)
.stream()
.filter(file -> !file.getName().startsWith("original")
&& !file.getName().endsWith("javadoc.jar")
&& !file.getName().endsWith("sources.jar"))
.collect(Collectors.toList());
assertThat(runnableJarFiles.size(), is(1));
String[] command = ("java -jar target/" + runnableJarFiles.get(0).getName()).split(" ");
petriServerProcess = new ProcessBuilder(command).redirectError(ProcessBuilder.Redirect.INHERIT).redirectOutput(ProcessBuilder.Redirect.INHERIT).start();
}
private void stopPetriServer() throws Exception {
petriServerProcess.destroy();
petriServerProcess = null;
}
private void waitForServer(ServerState expected) throws Exception {
final AsyncHttpClient.BoundRequestBuilder request = new AsyncHttpClient().prepareGet(BASE_SERVER_ADDRESS);
for (int i = 0; i < SERVER_STARTUP_RETRIES; i++) {
ServerState res;
try {
request.execute().get();
res = ServerState.STARTED;
} catch (Exception e) {
if (e.getCause() instanceof ConnectException) res = ServerState.STOPPED;
else throw new Exception("waiting for server got unexpected error: " + e.getMessage(), e);
}
if (res == expected) {
System.out.println("good, petri server started");
return;
}
System.out.println("still waiting for petri server to start..");
Thread.sleep(1000);
}
throw new Exception("Server did not change to " + expected + " state");
}
}
enum ServerState {STARTED, STOPPED} | [] | [] | [] | [] | [] | java | 0 | 0 | |
exl_env/lib/python3.6/site-packages/IPython/core/tests/test_magic.py | # -*- coding: utf-8 -*-
"""Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
import io
import os
import re
import sys
import warnings
from unittest import TestCase
from importlib import invalidate_caches
from io import StringIO
import nose.tools as nt
import shlex
from IPython import get_ipython
from IPython.core import magic
from IPython.core.error import UsageError
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic,
register_line_magic, register_cell_magic)
from IPython.core.magics import execution, script, code, logging, osm
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils.io import capture_output
from IPython.utils.tempdir import (TemporaryDirectory,
TemporaryWorkingDirectory)
from IPython.utils.process import find_cmd
_ip = get_ipython()
@magic.magics_class
class DummyMagics(magic.Magics): pass
def test_extract_code_ranges():
instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
expected = [(0, 1),
(2, 3),
(4, 6),
(6, 9),
(9, 14),
(16, None),
(None, 9),
(9, None),
(None, 13),
(None, None)]
actual = list(code.extract_code_ranges(instr))
nt.assert_equal(actual, expected)
def test_extract_symbols():
source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
expected = [([], ['a']),
(["def b():\n return 42\n"], []),
(["class A: pass\n"], []),
(["class A: pass\n", "def b():\n return 42\n"], []),
(["class A: pass\n"], ['a']),
([], ['z'])]
for symbols, exp in zip(symbols_args, expected):
nt.assert_equal(code.extract_symbols(source, symbols), exp)
def test_extract_symbols_raises_exception_with_non_python_code():
source = ("=begin A Ruby program :)=end\n"
"def hello\n"
"puts 'Hello world'\n"
"end")
with nt.assert_raises(SyntaxError):
code.extract_symbols(source, "hello")
def test_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.magic('doesntexist')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_cell_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.run_cell_magic('doesntexist', 'line', 'cell')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_magic_error_status():
def fail(shell):
1/0
_ip.register_magic_function(fail)
result = _ip.run_cell('%fail')
assert isinstance(result.error_in_exec, ZeroDivisionError)
def test_config():
""" test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registered as a
magic.
"""
## should not raise.
_ip.magic('config')
def test_config_available_configs():
""" test that config magic prints available configs in unique and
sorted order. """
with capture_output() as captured:
_ip.magic('config')
stdout = captured.stdout
config_classes = stdout.strip().split('\n')[1:]
nt.assert_list_equal(config_classes, sorted(set(config_classes)))
def test_config_print_class():
""" test that config with a classname prints the class's options. """
with capture_output() as captured:
_ip.magic('config TerminalInteractiveShell')
stdout = captured.stdout
if not re.match("TerminalInteractiveShell.* options", stdout.splitlines()[0]):
print(stdout)
raise AssertionError("1st line of stdout not like "
"'TerminalInteractiveShell.* options'")
def test_rehashx():
# clear up everything
_ip.alias_manager.clear_aliases()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
nt.assert_true(len(_ip.alias_manager.aliases) > 10)
for name, cmd in _ip.alias_manager.aliases:
# we must strip dots from alias names
nt.assert_not_in('.', name)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
nt.assert_true(len(scoms) > 10)
def test_magic_parse_options():
"""Test that we don't mangle paths when parsing magic options."""
ip = get_ipython()
path = 'c:\\x'
m = DummyMagics(ip)
opts = m.parse_options('-f %s' % path,'f:')[0]
# argv splitting is os-dependent
if os.name == 'posix':
expected = 'c:x'
else:
expected = path
nt.assert_equal(opts['f'], expected)
def test_magic_parse_long_options():
"""Magic.parse_options can handle --foo=bar long options"""
ip = get_ipython()
m = DummyMagics(ip)
opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
nt.assert_in('foo', opts)
nt.assert_in('bar', opts)
nt.assert_equal(opts['bar'], "bubble")
@dec.skip_without('sqlite3')
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
"""
@dec.skip_without('sqlite3')
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
"""
@dec.skip_without('sqlite3')
def doctest_hist_op():
"""Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
"""
def test_hist_pof():
ip = get_ipython()
ip.run_cell(u"1+2", store_history=True)
#raise Exception(ip.history_manager.session_number)
#raise Exception(list(ip.history_manager._get_range_session()))
with TemporaryDirectory() as td:
tf = os.path.join(td, 'hist.py')
ip.run_line_magic('history', '-pof %s' % tf)
assert os.path.isfile(tf)
@dec.skip_without('sqlite3')
def test_macro():
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
ip.magic("macro test 1-3")
nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
# List macros
nt.assert_in("test", ip.magic("macro"))
@dec.skip_without('sqlite3')
def test_macro_run():
"""Test that we can run a multi-line macro successfully."""
ip = get_ipython()
ip.history_manager.reset()
cmds = ["a=10", "a+=1", "print(a)", "%macro test 2-3"]
for cmd in cmds:
ip.run_cell(cmd, store_history=True)
nt.assert_equal(ip.user_ns["test"].value, "a+=1\nprint(a)\n")
with tt.AssertPrints("12"):
ip.run_cell("test")
with tt.AssertPrints("13"):
ip.run_cell("test")
def test_magic_magic():
"""Test %magic"""
ip = get_ipython()
with capture_output() as captured:
ip.magic("magic")
stdout = captured.stdout
nt.assert_in('%magic', stdout)
nt.assert_in('IPython', stdout)
nt.assert_in('Available', stdout)
@dec.skipif_not_numpy
def test_numpy_reset_array_undec():
"Test '%reset array' functionality"
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
nt.assert_in('a', _ip.user_ns)
_ip.magic('reset -f array')
nt.assert_not_in('a', _ip.user_ns)
def test_reset_out():
"Test '%reset out' magic"
_ip.run_cell("parrot = 'dead'", store_history=True)
# test '%reset -f out', make an Out prompt
_ip.run_cell("parrot", store_history=True)
nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
_ip.magic('reset -f out')
nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
nt.assert_equal(len(_ip.user_ns['Out']), 0)
def test_reset_in():
"Test '%reset in' magic"
# test '%reset -f in'
_ip.run_cell("parrot", store_history=True)
nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
_ip.magic('%reset -f in')
nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
def test_reset_dhist():
"Test '%reset dhist' magic"
_ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
_ip.magic('cd ' + os.path.dirname(nt.__file__))
_ip.magic('cd -')
nt.assert_true(len(_ip.user_ns['_dh']) > 0)
_ip.magic('reset -f dhist')
nt.assert_equal(len(_ip.user_ns['_dh']), 0)
_ip.run_cell("_dh = [d for d in tmp]") #restore
def test_reset_in_length():
"Test that '%reset in' preserves In[] length"
_ip.run_cell("print 'foo'")
_ip.run_cell("reset -f in")
nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
nt.assert_equal(last_line, "SyntaxError: invalid syntax")
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n"
" %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop('run', None)
with tt.AssertNotPrints("not found", channel='stderr'):
ip.run_cell("%%time\n"
"run = 0\n"
"run += 1")
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.magic('doctest_mode')
_ip.magic('doctest_mode')
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/')
curpath = os.getcwd
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.magic('cd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('cd -')
nt.assert_equal(curpath(), startdir)
_ip.magic('pushd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('popd')
nt.assert_equal(curpath(), startdir)
finally:
os.chdir(startdir)
def test_cd_force_quiet():
"""Test OSMagics.cd_force_quiet option"""
_ip.config.OSMagics.cd_force_quiet = True
osmagics = osm.OSMagics(shell=_ip)
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
with tt.AssertNotPrints(ipdir):
osmagics.cd('"%s"' % ipdir)
with tt.AssertNotPrints(startdir):
osmagics.cd('-')
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(4):
_ip.magic("xmode")
nt.assert_equal(_ip.InteractiveTB.mode, xmode)
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
nt.assert_equal(monitor, [])
_ip.magic("reset -f")
nt.assert_equal(monitor, [1])
class TestXdel(tt.TempFileMixin):
def test_xdel(self):
"""Test that references from %run are cleared by xdel."""
src = ("class A(object):\n"
" monitor = []\n"
" def __del__(self):\n"
" self.monitor.append(1)\n"
"a = A()\n")
self.mktmp(src)
# %run creates some hidden references...
_ip.magic("run %s" % self.fname)
# ... as does the displayhook.
_ip.run_cell("a")
monitor = _ip.user_ns["A"].monitor
nt.assert_equal(monitor, [])
_ip.magic("xdel a")
# Check that a's __del__ method has been called.
nt.assert_equal(monitor, [1])
def doctest_who():
"""doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
"""
def test_whos():
"""Check that whos is protected against objects where repr() fails."""
class A(object):
def __repr__(self):
raise Exception()
_ip.user_ns['a'] = A()
_ip.magic("whos")
def doctest_precision():
"""doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: '%.5f'
In [3]: f.float_format
Out[3]: '%.5f'
In [4]: %precision %e
Out[4]: '%e'
In [5]: f(3.1415927)
Out[5]: '3.141593e+00'
"""
def test_psearch():
with tt.AssertPrints("dict.fromkeys"):
_ip.run_cell("dict.fr*?")
def test_timeit_shlex():
"""test shlex issues with timeit (#1109)"""
_ip.ex("def f(*a,**kw): pass")
_ip.magic('timeit -n1 "this is a bug".count(" ")')
_ip.magic('timeit -r1 -n1 f(" ", 1)')
_ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
_ip.magic('timeit -r1 -n1 ("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b ")')
def test_timeit_special_syntax():
"Test %%timeit with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
def test_timeit_return():
"""
test whether timeit -o return object
"""
res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
assert(res is not None)
def test_timeit_quiet():
"""
test quiet option of timeit magic
"""
with tt.AssertNotPrints("loops"):
_ip.run_cell("%timeit -n1 -r1 -q 1")
def test_timeit_return_quiet():
with tt.AssertNotPrints("loops"):
res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
assert (res is not None)
def test_timeit_invalid_return():
with nt.assert_raises_regex(SyntaxError, "outside function"):
_ip.run_line_magic('timeit', 'return')
@dec.skipif(execution.profile is None)
def test_prun_special_syntax():
"Test %%prun with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('prun', '-q %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('prun', '-q', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
@dec.skipif(execution.profile is None)
def test_prun_quotes():
"Test that prun does not clobber string escapes (GH #1302)"
_ip.magic(r"prun -q x = '\t'")
nt.assert_equal(_ip.user_ns['x'], '\t')
def test_extension():
# Debugging information for failures of this test
print('sys.path:')
for p in sys.path:
print(' ', p)
print('CWD', os.getcwd())
nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
sys.path.insert(0, daft_path)
try:
_ip.user_ns.pop('arq', None)
invalidate_caches() # Clear import caches
_ip.magic("load_ext daft_extension")
nt.assert_equal(_ip.user_ns['arq'], 185)
_ip.magic("unload_ext daft_extension")
assert 'arq' not in _ip.user_ns
finally:
sys.path.remove(daft_path)
def test_notebook_export_json():
_ip = get_ipython()
_ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
for i, cmd in enumerate(cmds, start=1):
_ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.ipynb")
_ip.magic("notebook -e %s" % outfile)
class TestEnv(TestCase):
def test_env(self):
env = _ip.magic("env")
self.assertTrue(isinstance(env, dict))
def test_env_get_set_simple(self):
env = _ip.magic("env var val1")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val1')
self.assertEqual(_ip.magic("env var"), 'val1')
env = _ip.magic("env var=val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2')
def test_env_get_set_complex(self):
env = _ip.magic("env var 'val1 '' 'val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], "'val1 '' 'val2")
self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
env = _ip.magic('env var=val2 val3="val4')
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2 val3="val4')
def test_env_set_bad_input(self):
self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
def test_env_set_whitespace(self):
self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
class CellMagicTestCase(TestCase):
def check_ident(self, magic):
# Manually called, we get the result
out = _ip.run_cell_magic(magic, 'a', 'b')
nt.assert_equal(out, ('a','b'))
# Via run_cell, it goes into the user's namespace via displayhook
_ip.run_cell('%%' + magic +' c\nd\n')
nt.assert_equal(_ip.user_ns['_'], ('c','d\n'))
def test_cell_magic_func_deco(self):
"Cell magic using simple decorator"
@register_cell_magic
def cellm(line, cell):
return line, cell
self.check_ident('cellm')
def test_cell_magic_reg(self):
"Cell magic manually registered"
def cellm(line, cell):
return line, cell
_ip.register_magic_function(cellm, 'cell', 'cellm2')
self.check_ident('cellm2')
def test_cell_magic_class(self):
"Cell magics declared via a class"
@magics_class
class MyMagics(Magics):
@cell_magic
def cellm3(self, line, cell):
return line, cell
_ip.register_magics(MyMagics)
self.check_ident('cellm3')
def test_cell_magic_class2(self):
"Cell magics declared via a class, #2"
@magics_class
class MyMagics2(Magics):
@cell_magic('cellm4')
def cellm33(self, line, cell):
return line, cell
_ip.register_magics(MyMagics2)
self.check_ident('cellm4')
# Check that nothing is registered as 'cellm33'
c33 = _ip.find_cell_magic('cellm33')
nt.assert_equal(c33, None)
def test_file():
"""Basic %%writefile"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_single_quote():
"""Basic %%writefile with embedded single quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '\'file1\'')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_double_quote():
"""Basic %%writefile with embedded double quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '"file1"')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_var_expand():
"""%%writefile $filename"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.user_ns['filename'] = fname
ip.run_cell_magic("writefile", '$filename', u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_unicode():
"""%%writefile with unicode cell"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
u'liné1',
u'liné2',
]))
with io.open(fname, encoding='utf-8') as f:
s = f.read()
nt.assert_in(u'liné1\n', s)
nt.assert_in(u'liné2', s)
def test_file_amend():
"""%%writefile -a amends files"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file2')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
ip.run_cell_magic("writefile", "-a %s" % fname, u'\n'.join([
'line3',
'line4',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line3\n', s)
def test_file_spaces():
"""%%file with spaces in filename"""
ip = get_ipython()
with TemporaryWorkingDirectory() as td:
fname = "file name"
ip.run_cell_magic("file", '"%s"'%fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_script_config():
ip = get_ipython()
ip.config.ScriptMagics.script_magics = ['whoda']
sm = script.ScriptMagics(shell=ip)
nt.assert_in('whoda', sm.magics['cell'])
@dec.skip_win32
def test_script_out():
ip = get_ipython()
ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
@dec.skip_win32
def test_script_err():
ip = get_ipython()
ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_bg_out():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
ip.user_ns['output'].close()
@dec.skip_win32
def test_script_bg_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['error'].close()
@dec.skip_win32
def test_script_bg_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['output'].close()
ip.user_ns['error'].close()
def test_script_defaults():
ip = get_ipython()
for cmd in ['sh', 'bash', 'perl', 'ruby']:
try:
find_cmd(cmd)
except Exception:
pass
else:
nt.assert_in(cmd, ip.magics_manager.magics['cell'])
@magics_class
class FooFoo(Magics):
"""class with both %foo and %%foo magics"""
@line_magic('foo')
def line_foo(self, line):
"I am line foo"
pass
@cell_magic("foo")
def cell_foo(self, line, cell):
"I am cell foo, not line foo"
pass
def test_line_cell_info():
"""%%foo and %foo magics are distinguishable to inspect"""
ip = get_ipython()
ip.magics_manager.register(FooFoo)
oinfo = ip.object_inspect('foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
oinfo = ip.object_inspect('%%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
oinfo = ip.object_inspect('%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
def test_multiple_magics():
ip = get_ipython()
foo1 = FooFoo(ip)
foo2 = FooFoo(ip)
mm = ip.magics_manager
mm.register(foo1)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
mm.register(foo2)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
def test_alias_magic():
"""Test %alias_magic."""
ip = get_ipython()
mm = ip.magics_manager
# Basic operation: both cell and line magics are created, if possible.
ip.run_line_magic('alias_magic', 'timeit_alias timeit')
nt.assert_in('timeit_alias', mm.magics['line'])
nt.assert_in('timeit_alias', mm.magics['cell'])
# --cell is specified, line magic not created.
ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
nt.assert_in('timeit_cell_alias', mm.magics['cell'])
# Test that line alias is created successfully.
ip.run_line_magic('alias_magic', '--line env_alias env')
nt.assert_equal(ip.run_line_magic('env', ''),
ip.run_line_magic('env_alias', ''))
# Test that line alias with parameters passed in is created successfully.
ip.run_line_magic('alias_magic', '--line history_alias history --params ' + shlex.quote('3'))
nt.assert_in('history_alias', mm.magics['line'])
def test_save():
"""Test %save."""
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "testsave.py")
ip.run_line_magic("save", "%s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 1)
nt.assert_in('coding: utf-8', content)
ip.run_line_magic("save", "-a %s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 2)
nt.assert_in('coding: utf-8', content)
def test_store():
"""Test %store."""
ip = get_ipython()
ip.run_line_magic('load_ext', 'storemagic')
# make sure the storage is empty
ip.run_line_magic('store', '-z')
ip.user_ns['var'] = 42
ip.run_line_magic('store', 'var')
ip.user_ns['var'] = 39
ip.run_line_magic('store', '-r')
nt.assert_equal(ip.user_ns['var'], 42)
ip.run_line_magic('store', '-d var')
ip.user_ns['var'] = 39
ip.run_line_magic('store' , '-r')
nt.assert_equal(ip.user_ns['var'], 39)
def _run_edit_test(arg_s, exp_filename=None,
exp_lineno=-1,
exp_contents=None,
exp_is_temp=None):
ip = get_ipython()
M = code.CodeMagics(ip)
last_call = ['','']
opts,args = M.parse_options(arg_s,'prxn:')
filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
if exp_filename is not None:
nt.assert_equal(exp_filename, filename)
if exp_contents is not None:
with io.open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
nt.assert_equal(exp_contents, contents)
if exp_lineno != -1:
nt.assert_equal(exp_lineno, lineno)
if exp_is_temp is not None:
nt.assert_equal(exp_is_temp, is_temp)
def test_edit_interactive():
"""%edit on interactively defined objects"""
ip = get_ipython()
n = ip.execution_count
ip.run_cell(u"def foo(): return 1", store_history=True)
try:
_run_edit_test("foo")
except code.InteractivelyDefined as e:
nt.assert_equal(e.index, n)
else:
raise AssertionError("Should have raised InteractivelyDefined")
def test_edit_cell():
"""%edit [cell id]"""
ip = get_ipython()
ip.run_cell(u"def foo(): return 1", store_history=True)
# test
_run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
def test_bookmark():
ip = get_ipython()
ip.run_line_magic('bookmark', 'bmname')
with tt.AssertPrints('bmname'):
ip.run_line_magic('bookmark', '-l')
ip.run_line_magic('bookmark', '-d bmname')
def test_ls_magic():
ip = get_ipython()
json_formatter = ip.display_formatter.formatters['application/json']
json_formatter.enabled = True
lsmagic = ip.magic('lsmagic')
with warnings.catch_warnings(record=True) as w:
j = json_formatter(lsmagic)
nt.assert_equal(sorted(j), ['cell', 'line'])
nt.assert_equal(w, []) # no warnings
def test_strip_initial_indent():
def sii(s):
lines = s.splitlines()
return '\n'.join(code.strip_initial_indent(lines))
nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
nt.assert_equal(sii("a\n b"), "a\n b")
def test_logging_magic_quiet_from_arg():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart('-q {}'.format(
os.path.join(td, "quiet_from_arg.log")))
finally:
_ip.logger.logstop()
def test_logging_magic_quiet_from_config():
_ip.config.LoggingMagics.quiet = True
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "quiet_from_config.log"))
finally:
_ip.logger.logstop()
def test_logging_magic_not_quiet():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "not_quiet.log"))
finally:
_ip.logger.logstop()
def test_time_no_var_expand():
_ip.user_ns['a'] = 5
_ip.user_ns['b'] = []
_ip.magic('time b.append("{a}")')
assert _ip.user_ns['b'] == ['{a}']
# this is slow, put at the end for local testing.
def test_timeit_arguments():
"Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
if sys.version_info < (3,7):
_ip.magic("timeit ('#')")
else:
# 3.7 optimize no-op statement like above out, and complain there is
# nothing in the for loop.
_ip.magic("timeit a=('#')")
| [] | [] | [
"var"
] | [] | ["var"] | python | 1 | 0 | |
cmd/Run/run.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"log"
"os"
"os/exec"
"fmt"
"time"
"path"
"strings"
"strconv"
"syscall"
"code.google.com/p/goplan9/plan9/acme"
)
var _ = fmt.Printf
func main() {
log.SetFlags(0)
log.SetPrefix("Run: ")
file := os.Getenv("samfile")
if file == "" {
log.Fatal("not running in acme")
}
id, _ := strconv.Atoi(os.Getenv("winid"))
wfile, err := acme.Open(id, nil)
if err != nil {
log.Fatal(err)
}
wfile.Ctl("put")
wfile.CloseFiles()
wname := "/go/run/" + strings.TrimSuffix(path.Base(file), ".go")
windows, _ := acme.Windows()
var w *acme.Win
for _, info := range windows {
if info.Name == wname {
ww, err := acme.Open(info.ID, nil)
if err != nil {
log.Fatal(err)
}
ww.Addr(",")
ww.Write("data", nil)
w = ww
break
}
}
if w == nil {
ww, err := acme.New()
if err != nil {
log.Fatal(err)
}
ww.Name(wname)
w = ww
}
w.Ctl("clean")
defer w.Ctl("clean")
cmd := exec.Command("go", append([]string{"run", os.Getenv("samfile")}, os.Args[1:]...)...)
cmd.Stdout = bodyWriter{w}
cmd.Stderr = cmd.Stdout
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
err = cmd.Start()
if err != nil {
w.Fprintf("body", "error starting command: %v\n", err)
return
}
//stop := blinker(w)
w.Ctl("cleartag")
w.Fprintf("tag", " Kill Stack")
done := make(chan bool)
go func() {
err := cmd.Wait()
if err != nil {
w.Fprintf("body", "\nerror running command: %v\n", err)
}
//stop <- true
done <- true
}()
deleted := make(chan bool, 1)
go func() {
for e := range w.EventChan() {
if e.C2 == 'x' || e.C2 == 'X' {
switch string(e.Text) {
case "Del":
select {
case deleted <- true:
default:
}
syscall.Kill(-cmd.Process.Pid, 2)
continue
case "Kill":
syscall.Kill(-cmd.Process.Pid, 2)
continue
case "Stack":
syscall.Kill(-cmd.Process.Pid, 3)
continue
}
w.WriteEvent(e)
}
}
}()
<-done
w.Ctl("cleartag")
select {
case <-deleted:
w.Ctl("delete")
default:
}
}
type bodyWriter struct {
w *acme.Win
}
func (w bodyWriter) Write(b []byte) (int, error) {
return w.w.Write("body", b)
}
func blinker(w *acme.Win) chan bool {
c := make(chan bool)
go func() {
t := time.NewTicker(300 * time.Millisecond)
defer t.Stop()
dirty := false
for {
select {
case <-t.C:
dirty = !dirty
if dirty {
w.Ctl("dirty")
} else {
w.Ctl("clean")
}
case <-c:
if dirty {
w.Ctl("clean")
}
return
}
}
}()
return c
}
| [
"\"samfile\"",
"\"winid\"",
"\"samfile\""
] | [] | [
"samfile",
"winid"
] | [] | ["samfile", "winid"] | go | 2 | 0 | |
tests/vitriolic/wsgi.py | """
WSGI config for vitriolic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vitriolic.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
dued/config.py | import copy
import json
import os
import types
from os.path import join, splitext, expanduser
from .entorno import Entorno
from .excepciones import TipoDeArchivoDesconocido, MiembroDeConfigNoSeleccionable
from .corredores import Local
from .terminales import WINDOWS
from .util import debug, six, yaml
if six.PY3:
try:
from importlib.machinery import SourceFileLoader
except ImportError: # PyPy3
from importlib._bootstrap import _SourceFileLoader as SourceFileLoader
def load_source(nombre, ruta):
if not os.path.exists(ruta):
return {}
return vars(SourceFileLoader("mod", ruta).load_module())
else:
import imp
def load_source(nombre, ruta):
if not os.path.exists(ruta):
return {}
return vars(imp.load_source("mod", ruta))
class DataProxy(object):
"""
Clase de ayuda que implementa el acceso anidado dict+atrib para `.Config`.
Específicamente, se usa tanto para `.Config` como para envolver cualquier
otro dicts asignado como valores de configuración (recursivamente).
.. warning::
Todos los métodos (de este objeto o en subclases) deben tener cuidado
de inicializar nuevos atributos a través de
``self._set(nombre='valor')``, ¡o se producirán errores de
recursividad!
.. versionadded:: 1.0
"""
# Atributos que se proxean (transfieren) a través del obj de config interno de dict-fusionado.
_proxies = (
tuple(
"""
get
has_key
items
iteritems
iterkeys
itervalues
claves
valores
""".split()
)
+ tuple(
"__{}__".format(x)
for x in """
cmp
contains
iter
sizeof
""".split()
)
)
@classmethod
def datos_desde(cls, datos, raiz=None, rutaclave=tuple()):
"""
Constructor alternativo para DataProxies 'baby' usado como valores de
sub-dict.
Permite la creación de objetos DataProxy independientes y también
permite que las subclases como `.Config` definan su propio
``__init__`` sin confundir las dos.
:param dict datos:
Los datos personales de este DataProxy en particular. Requerido,
son los datos que se están transfiriendo (proxeando).
:param raiz:
Manejador opcional en un DataProxy/Config raíz que
necesita notificación sobre actualizaciones de datos.
:param tuple rutaclave:
Tupla opcional que describe la ruta de las claves que conducen a
la ubicación de este DataProxy dentro de la estructura ``raíz``
(raiz) Requerido si se dio ``raiz`` (y viceversa).
.. versionadded:: 1.0
"""
obj = cls()
obj._set(_config=datos)
obj._set(_raiz=raiz)
obj._set(_rutaclave=rutaclave)
return obj
def __getattr__(self, clave):
# NOTE: debido a la semántica predeterminada de búsqueda de atributos
# de Python, los atributos "reales" siempre se mostrarán en el acceso
# a los atributos y este método se omite. Ese comportamiento es bueno
# para nosotros (es más intuitivo que tener una clave de configuración q
# que sombree accidentalmente un atributo o método real).
try:
return self._get(clave)
except KeyError:
# Proxy las vars más especiales para configurar el protocolo dict.
if clave in self._proxies:
return getattr(self._config, clave)
# De lo contrario, genere AttributeError útil para seguir getattr proto.
err = "No attribute or config clave found for {!r}".format(clave)
attrs = [x for x in dir(self.__class__) if not x.startswith("_")]
err += "\n\nValid claves: {!r}".format(
sorted(list(self._config.claves()))
)
err += "\n\nValid real attributes: {!r}".format(attrs)
raise AttributeError(err)
def __setattr__(self, clave, valor):
# Convierta los conjuntos-de-atributos en actualizaciones de config siempre
# que no tengamos un atributo real con el nombre/clave dado.
tiene_atributo_real = clave in dir(self)
if not tiene_atributo_real:
# Asegúrese de activar nuestro propio __setitem__ en lugar de ir
# directamente a nuestro dict/cache interno
self[clave] = valor
else:
super(DataProxy, self).__setattr__(clave, valor)
def __iter__(self):
# Por alguna razón, Python ignora nuestro __hasattr__ al determinar
# si admitimos __iter__. QUE MAL
return iter(self._config)
def __eq__(self, otro):
# NOTE: No se puede usar el proxy __eq__ porque el RHS siempre será un
# obj de la clase actual, no la clase de proxy, y eso causa NotImplemented.
# Intente comparar con otros objetos como nosotros, recurriendo a un valor
# no muy comparable (None) para que la comparación falle.
otro_val = getattr(otro, "_config", None)
# But we can compare to vanilla dicts just fine, since our _config is
# itself just a dict.
if isinstance(otro, dict):
otro_val = otro
return self._config == otro_val
# Hacer imposible, porque toda nuestra razón de ser es ser algo mutable.
# Las subclases con atributos mutables pueden anular esto.
# NOTE: esto es principalmente una concesión a Python 2, v3 lo hace automáticamente.
__hash__ = None
def __len__(self):
return len(self._config)
def __setitem__(self, clave, valor):
self._config[clave] = valor
self._trazar_modificacion_de(clave, valor)
def __getitem__(self, clave):
return self._get(clave)
def _get(self, clave):
# Cortocircuito si los mecanismos de pickling/copia preguntan si
# tenemos __setstate__ etc; preguntarán esto sin llamar a nuestro
# __init__ primero, por lo que, de lo contrario, estaríamos en un
# catch-22 que causa RecursionError.
if clave in ("__setstate__",):
raise AttributeError(clave)
# En este punto deberíamos poder asumir un self._config ...
valor = self._config[clave]
if isinstance(valor, dict):
# La rutaclave del nuevo objeto es simplemente la clave, precedida
# por nuestra propia rutaclave si tenemos una.
rutaclave = (clave,)
if hasattr(self, "_rutaclave"):
rutaclave = self._rutaclave + rutaclave
# Si no tenemos _raiz, debemos ser la raiz, entonces somos nosotros.
# De lo contrario, pase nuestro manejador por la raiz.
raiz = getattr(self, "_raiz", self)
valor = DataProxy.datos_desde(datos=valor, raiz=raiz, rutaclave=rutaclave)
return valor
def _set(self, *args, **kwargs):
"""
Solución alternativa de conveniencia del comportamiento predeterminado
de 'atributos son claves de configuración'.
Utiliza `object .__ setattr__` para evitar el comportamiento normal de
proxy de la clase, pero es menos verboso que usarlo directamente.
Tiene dos modos (que pueden combinarse si realmente lo desea):
- ``self._set('attrname', valor)``, al igual que ``__setattr__``
- ``self._set(attname=valor)`` (es decir, kwargs), incluso escribiendo
menos.
"""
if args:
object.__setattr__(self, *args)
for clave, valor in six.iteritems(kwargs):
object.__setattr__(self, clave, valor)
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self._config)
def __contains__(self, clave):
return clave in self._config
@property
def _es_hoja(self):
return hasattr(self, "_raiz")
@property
def _es_raiz(self):
return hasattr(self, "_modificar")
def _trazar_eliminacion_de(self, clave):
# Agarrar el objeto raiz responsable de rastrear las remociones; ya
# sea la raiz referenciada (si somos una hoja) o nosotros mismos
# (si no lo somos). (Los nodos intermedios nunca tienen nada más
# que __getitem__ invocados, de lo contrario, por definición, se
# tratan como una hoja).
objetivo = None
if self._es_hoja:
objetivo = self._raiz
elif self._es_raiz:
objetivo = self
if objetivo is not None:
objetivo._eliminar(getattr(self, "_rutaclave", tuple()), clave)
def _trazar_modificacion_de(self, clave, valor):
objetivo = None
if self._es_hoja:
objetivo = self._raiz
elif self._es_raiz:
objetivo = self
if objetivo is not None:
objetivo._modificar(getattr(self, "_rutaclave", tuple()), clave, valor)
def __delitem__(self, clave):
del self._config[clave]
self._trazar_eliminacion_de(clave)
def __delattr__(self, nombre):
# Asegúrese de no estropear la eliminación de atributos verdaderos
# para las situaciones que realmente lo desean. (Poco común, pero no raro).
if nombre in self:
del self[nombre]
else:
object.__delattr__(self, nombre)
def limpiar(self):
claves = list(self.claves())
for clave in claves:
del self[clave]
def pop(self, *args):
# Debe probar esto antes de (posiblemente) mutar self._config
clave_existio = args and args[0] in self._config
# Siempre tenemos un _config (ya sea un dict real o un caché de niveles
# combinados) para que podamos recurrir a él para todos los casos de
# esquina que manejan re: args (arity, manejo de un valor predeterminado,
# aumento de KeyError, etc.)
ret = self._config.pop(*args)
# Si parece que no se produjo ningún estallido (la clave no estaba allí),
# presumiblemente el usuario dio el valor predeterminado, por lo que
# podemos hacer un corto para regresar aquí, no es necesario rastrear
# una eliminación que no sucedió.
if not clave_existio:
return ret
# Aquí, podemos suponer que existió al menos la primera posarg (clave).
self._trazar_eliminacion_de(args[0])
# En todos los casos, devuelve el valor explotado.
return ret
def popitem(self):
ret = self._config.popitem()
self._trazar_eliminacion_de(ret[0])
return ret
def setdefault(self, *args):
# Debe probar por adelantado si la clave existió de antemano
clave_existio = args and args[0] in self._config
# Correr localmente
ret = self._config.setdefault(*args)
# La Clave ya existía -> nada había mutado, cortocircuito
if clave_existio:
return ret
# Aquí, podemos suponer que la clave no existía y, por lo tanto, el
# usuario debe haber proporcionado un 'default' (si no lo hubiera hecho,
# se habría exceptuado el setdefault() real anterior).
clave, default = args
self._trazar_modificacion_de(clave, default)
return ret
def actualizar(self, *args, **kwargs):
if kwargs:
for clave, valor in six.iteritems(kwargs):
self[clave] = valor
elif args:
# TODO: quejarse si arity>1
arg = args[0]
if isinstance(arg, dict):
for clave in arg:
self[clave] = arg[clave]
else:
# TODO: Ser más estricto sobre la entrada en este caso
for pair in arg:
self[pair[0]] = pair[1]
class Config(DataProxy):
"""
la clase de manejo de configuración principal de dued.
Ver: doc: `/kh_general/configuracion` para detalles sobre el sistema de
configuración que implementa esta clase, incluyendo :ref:`jerarquía de
configuración <jerarquía-de-config> `. El resto de la documentación de
esta lase asume familiaridad con ese documento.
**Acceso**
Se puede acceder y/o actualizar los valores de configuración usando la
sintaxis dict::
config['foo']
o sintaxis de atributo::
config.foo
El anidamiento funciona de la misma manera: los valores de configuración
del dict se convierten en objetos que respetan tanto el protocolo de
diccionario como el método de acceso a atributos::
config['foo']['bar']
config.foo.bar
**Una nota sobre métodos y acceso a atributos**
Esta clase implementa todo el protocolo del diccionario: métodos como
``claves``, ``valores``, ``items``, ``pop``, etc., deberían funcionar
como lo hacen en los dicts regulares. También implementa nuevos métodos
específicos de configuración como `cargar_sistema`,`cargar_coleccion`,
`combinar`,`clonar`, etc.
.. warning::
En consecuencia, esto significa que si tiene opciones de configuración
compartiendo nombres con estos métodos, **debe** usar sintaxis de
diccionario (por ejemplo, ``miconfig['claves']``) para acceder a los
datos de configuración.
**Coclo de Vida**
En el momento de la inicialización, `.Config`:
- crea estructuras de datos por-nivel;
- almacena los niveles suministrados a `__init__`, como defaults o
anulaciones, así como las diversos patrones rutas/nombredearchivo;
- y carga archivos de configuración, si los encuentra (aunque normalmente
esto solo significa archivos de nivel de usuario y de sistema, ya que
los archivos de proyecto y tiempo de ejecución necesitan más información
antes de que se puedan encontrar y cargar).
- Este paso se puede omitir especificando ``lento=True``.
En este punto, `.Config` es completamente utilizable - y debido a que
carga preventivamente algunos archivos de configuración, esos archivos de
config pueden afectar cualquier cosa que venga después, como el análisis
CLI o la carga de colecciones de artefacto.
En el caso de uso de CLI, el procesamiento adicional se realiza después de
la instanciación, utilizando los métodos ``load_*`` como
`cargar_anulaciones`, `cargar_proyecto`, etc.
- el resultado del análisis de argumento/opción se aplica al nivel de
anulaciones;
- se carga un archivo de configuración a nivel de proyecto, ya que depende
de una colección de artefactos cargada;
- se carga un archivo de configuración en tiempo de ejecución, si se
proporcionó su bandera;
- luego, para cada artefacto que se esté ejecutando:
- se cargan los datos per-coleccion (solo es posible ahora que tenemos
coleccion & artefacto en la mano);
- Se cargan los datos del entorno de shell (debe hacerse al final del
proceso debido a que se usa el resto de la configuración como guía
para interpretar los nombres de var de entorno).
En este punto, el objeto de configuración se entrega al artefacto que se
está ejecutando, como parte de su ejecución `.Contexto`.
Cualquier modificación realizada directamente en el propio `.Config`
después de este punto terminará almacenada en su propio nivel de
configuración (superior), lo que facilita la depuración de los valores
finales.
Finalmente, cualquier *eliminación* realizada en el `.Config` (por
ejemplo, aplicaciones de mutadores de dict-style como ``pop``,
``limpiar``, etc.) también se rastrea en su propia estructura, lo que
permite que el objeto de configuración respete tales llamadas a métodos
sin mutar los datos origen subyacentes.
**Atributos de clase especiales**
Los siguientes atributos de nivel-de-clase se utilizan para la
configuración de bajo-nivel del propio sistema de configuración, como qué
rutas de archivo cargar. Están destinados principalmente a ser
reemplazados por subclases.
- ``prefijo``: proporciona el valor predeterminado para
``archivo_prefijo`` (directamente) y ``entorno_prefijo`` (en
mayúsculas). Consulte sus descripciones para obtener más detalles. Su
valor por default es ``"dued"``.
- ``archivo_prefijo``: el archivo de configuración 'basename'
predeterminado (aunque no es un literal basename; puede contener partes
de ruta si se desea) que se agrege a los valores configurados de
``sistema_prefijo``, ``ususario_prefijo``, etc, para llegar a las
rutas de archivo finales (pre-extension).
Por lo tanto, de forma predeterminada, una ruta de archivo de
configuración a nivel del sistema concatena el ``sistema_prefijo`` de
``/etc/`` con el ``archivo_prefijo`` de ``dued`` para llegar a rutas
como ``/etc/dued.json``.
Por defecto es ``None``, lo que significa utilizar el valor de
``prefijo``.
- ``entorno_prefijo``: Un prefijo usado (junto con un guión bajo de
unión) para determinar qué variables de entorno se cargan como nivel de
configuración de var de entorno. Dado que su default es el valor de
``prefijo`` en mayúsculas, esto significa que se buscan por defecto
vars de entorno como ``DUED_CORRER_ECHO``.
Por defecto es ``None``, lo que significa utilizar el valor de
``prefijo``.
.. versionadded:: 1.0
"""
prefijo = "dued"
archivo_prefijo = None
entorno_prefijo = None
@staticmethod
def global_defaults():
"""
Devuelve la configuración básica predeterminada de Dued.
Generalmente solo para uso por parte interna de `.Config`. Para
obtener descripciones de estos valores, consulte
:ref:`valores-predeterminados`.
Las subclases pueden optar por anular este método, llamando a
``Config.global_defaults`` y aplicando `.fusionar_dics` al
resultado, para agregar o modificar estos valores.
.. versionadded:: 1.0
"""
# En Windows, no tendrá /bin/bash, busque un COMSPEC entorno var configurado
# (https://en.wikipedia.org/wiki/COMSPEC) o, de lo contrario, recurra
# a un cmd.exe no calificado.
if WINDOWS:
shell = os.environ.get("COMSPEC", "cmd.exe")
# De lo contrario, suponga Unix, la mayoría de las distribuciones tienen
# /bin/bash disponible.
# TODO: considere una alternativa automática a /bin/sh para sistemas que
# carecen de /bin/bash; sin embargo, los usuarios pueden configurar
# correr.shell con bastante facilidad, así que ...
else:
shell = "/bin/bash"
return {
# TODO: documentamos 'debug' pero no está realmente implementado
# fuera de entorno var y bandera CLI. Si lo respetamos, tenemos que
# ir y averiguar en qué puntos podríamos querer llamar a
# `util.habilita_logs`:
# - usarlo como un default de respaldo para el análisis de arg no
# es de mucha utilidad, ya que en ese momento la config no
# contiene nada más que defaults y valores de bandera CLI
# - hacerlo en el momento de la carga del archivo puede ser algo útil,
# aunque cuando esto sucede puede estar sujeto a cambios pronto
# - hacerlo en el tiempo de carga de entorno var parece un poco
# tonto dado el soporte existente para las pruebas al inicio de
# DUED_DEBUG 'debug': False,
# TODO: Siento que queremos que estos sean más consistentes re: valores
# predeterminados almacenados aquí vs 'almacenados' como lógica
# donde se hace referencia, probablemente hay algunos bits que son todos
# "si Ninguno -> predeterminado" que podrían ir aquí. Alternativamente,
# ¿_más_ de estos valores predeterminados son Ninguno?
"correr": {
"asincrono": False,
"rechazado": False,
"seco": False,
"echo": False,
"echo_stdin": None,
"codificacion": None,
"entorno": {},
"err_stream": None,
"retroceder": True,
"ocultar": None,
"ing_stream": None,
"sal_stream": None,
"pty": False,
"reemplazar_ent": False,
"shell": shell,
"alarma": False,
"centinelas": [],
},
# Esto no vive dentro del árbol 'correr'; de lo contrario, sería algo
# más difícil extender/anular en Fabric 2, que tiene una situación de
# corredor local/remoto dividido. "corredores": {"local": Local},
"corredores": {"local": Local},
"sudo": {
"password": None,
"prompt": "[sudo] password: ",
"usuario": None,
},
"artefactos": {
"nombre_auto_guion": True,
"nombre_de_coleccion": "artefactos",
"dedupe": True,
"clase_ejecutor": None,
"dir_raiz": None,
},
"tiempo_de_descanso": {"comando": None},
}
def __init__(
self,
anulaciones=None,
defaults=None,
sistema_prefijo=None,
ususario_prefijo=None,
dir_de_py=None,
acte_ruta=None,
lento=False,
):
"""
Crea un nuevo objeto de configuración.
:param dict defaults:
Un dict que contiene datos de configuración predeterminados (nivel
más bajo). Por defecto: global_defaults`.
:param dict anulaciones:
Un dict que contiene los datos de configuración de
nivel-de-anulacion. Predeterminado: ``{}``.
:param str sistema_prefijo:
Ruta base para la ubicación del archivo de configuración global;
combinado con el prefijo y los sufijos de archivo para llegar a
los candidatos de ruta de archivo final.
Default: ``/etc/`` (por ejemplo, `/etc/dued.yaml`` o
``/etc/dued.json``).
:param str ususario_prefijo:
Como ``sistema_prefijo`` pero para el archivo config
por-usuario. Estas variables se unen como cadenas, no mediante
uniones de estilo-ruta, por lo que pueden contener rutas de
archivo parciales; para el archivo config por-usuario, esto a
menudo significa un punto inicial, para hacer que el resultado
final sea un archivo oculto en la mayoría de los sistemas.
Default: ``~/.`` (por ejemplo, ``~/.dued.yaml``).
:param str dir_de_py:
Opcional, ruta de directorio de la `.Coleccion` cargado
actualmente 8como lo carga` .Cargador`). Cuando no está vacío,
activará la búsqueda de arch. config por proyecto en este dir.
:param str acte_ruta:
Opcional ruta a un archivo de configuración en tiempo de
ejecución.
Se usa para ocupar el penúltimo espacio en la jerarquía de config.
Debe ser una ruta de archivo completa a un archivo existente, no
una ruta de directorio o un prefijo.
:param bool lento:
para cargar automáticamente algunos de los niveles de
configuración inferiores.
Por defecto es (``lento=False``), ``__init__`` llama automáticamente
a `cargar_sistema` y `cargar_usuario` para cargar los archivos de
config del sistema y del usuario, respectivamente.
Para tener más control sobre qué se carga y cuándo, puede decir
``lento=True`` y no se realiza ninguna carga automática.
.. note::
Si da ``default`` y/o ``anulaciones`` como kwargs ``__init__`` en
lugar de esperar a usar después `cargar_defaults` o
`cargar_anulaciones`, esos *terminarán* cargados inmediatamente.
"""
# Técnicamente, un detalle de implementación: no exponer en API pública.
# Almacena configuraciones fusionadas y se accede a través de DataProxy.
self._set(_config={})
# Sufijos de archivos de configuración para buscar, en orden de preferencia.
self._set(_sufijos_de_archivo=("yaml", "yml", "json", "py"))
# Valores de configuración predeterminados, normalmente una copia de `global_defaults`.
if defaults is None:
defaults = copiar_dic(self.global_defaults())
self._set(_defaults=defaults)
# Datos de configuración controlados por colección, recopilados del
# árbol de colección que contiene el artefacto en ejecución.
self._set(_coleccion={})
# Prefijo de ruta buscado para el archivo de configuración del sistema.
# NOTE: No hay un prefijo de sistema predeterminado en Windows.
if sistema_prefijo is None and not WINDOWS:
sistema_prefijo = "/etc/"
self._set(_sistema_prefijo=sistema_prefijo)
# Ruta al archivo de configuración del sistema cargado, si lo hubiera.
self._set(_sistema_ruta=None)
# Si el archivo de configuración del sistema se ha cargado o no (o
# ``None`` si aún no se ha intentado cargar).
self._set(_sistema_seteado=None)
# Data loaded from the system config file.
self._set(_sistema={})
# Prefijo de ruta buscado para archivos de configuración por usuario.
if ususario_prefijo is None:
ususario_prefijo = "~/."
self._set(_ususario_prefijo=ususario_prefijo)
# Ruta al archivo de configuración de usuario cargado, si lo hubiera.
self._set(_ususario_ruta=None)
# Whether the user config file has been loaded or not (or ``None`` if
# no loading has been attempted yet.)
# Si el archivo de configuración del usuario se ha cargado o no (o
# ``None`` si aún no se ha intentado cargar).
self._set(_ususario_seteado=None)
# Datos cargados desde el archivo de configuración por usuario.
self._set(_ususario={})
# Como es posible que desee setealo post-inicialización, los
# atributos relacionados con el archivo conf del proyecto se
# inicializan o sobrescriben mediante un método específico.
self.setea_proyecto_ruta(dir_de_py)
# Entorno variable nombre prefix
entorno_prefijo = self.entorno_prefijo
if entorno_prefijo is None:
entorno_prefijo = self.prefijo
entorno_prefijo = "{}_".format(entorno_prefijo.upper())
self._set(_entorno_prefijo=entorno_prefijo)
# Config datos loaded from the shell environment.
self._set(_entorno={})
# Como es posible que desee configurarlo post-inicialización, los
# atributos relacionados con el archivo de conf en tiempo de
# ejecución(acte) se inicializan o sobrescriben con un método específico.
self.setea_acte_ruta(acte_ruta)
# anulaciones - nivel de configuración normal más alto. Normalmente
# se rellena con banderas de línea de comando.
if anulaciones is None:
anulaciones = {}
self._set(_anula=anulaciones)
# Nivel más alto absoluto: modificaciones del usuario.
self._set(_modificaciones={})
# Y su hermano: eliminaciones de usuarios. (almacenado como un dict plano
# de claves de ruta Clave-valor ficticios (dummy), para pruebas/eliminación
# de miembros en tiempo constante sin recursividad desordenada.
# TODO: ¿tal vez rehacer _everything_ de esa manera? en _modificaciones
# y otros niveles, los valores por supuesto serían importantes y no solo None)
self._set(_eliminaciones={})
# Carga conveniente de archivos de usuario y sistema, ya
# que no requieren otros niveles para funcionar.
if not lento:
self._cargar_archivo_de_conf_base()
# Combinar siempre; de lo contrario, los valores predeterminados, etc.
# no se pueden utilizar hasta que el creador o una subrutina lo haga.
self.combinar()
def _cargar_archivo_de_conf_base(self):
# Solo una refactorización de algo hecho en init unlazy o en clonar()
self.cargar_sistema(combinar=False)
self.cargar_usuario(combinar=False)
def cargar_defaults(self, datos, combinar=True):
"""
Setea o reemplaza el nivel de configuración 'defaults', desde
``datos``.
:param dict datos: Los datos de config se cargarán como nivel por
defecto.
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._set(_defaults=datos)
if combinar:
self.combinar()
def cargar_anulaciones(self, datos, combinar=True):
"""
Setea o reemplaza el nivel de configuración 'anulaciones', desde ``datos``.
:param dict datos: Los datos de config se carga como nivel de
anulaciones
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._set(_anula=datos)
if combinar:
self.combinar()
def cargar_sistema(self, combinar=True):
"""
Carga un archivo de config a nivel-sistema, si es posible.
Chequea la ruta configurada ``_sistema_prefijo``, que por
defecto es ``/etc``, entonc. cargará archivos como ``/etc/dued.yml``.
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._cargar_archivo(prefijo="sistema", combinar=combinar)
def cargar_usuario(self, combinar=True):
"""
Carga un archivo de config a nivel-ususario, si es posible.
Chequea la ruta configurada ``_prefijo_del_ususario``, que por
defecto es ``~/.``, entonc. cargará archivos como ``~/.dued.yml``.
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._cargar_archivo(prefijo="usuario", combinar=combinar)
def cargar_proyecto(self, combinar=True):
"""
Carga un archivo de config a nivel-proyecto, si es posible.
Comprueba el valor de ``_proyecto_prefijo`` configurado derivado
de la ruta dada a `setea_proyecto_ruta`, que normalmente se establece en
el directorio que contiene la colección de artefacto cargada.
Por lo tanto, si se ejecutara la herramienta CLI para una colección de
artefactos ``/home/myuser/code/dued.yml``, `cargar_proyecto` buscaría
archivos como ``/home/myuser/code/dued.yml``.
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._cargar_archivo(prefijo="proyecto", combinar=combinar)
def setea_acte_ruta(self, ruta):
"""
Establece la ruta del archivo de configuración en tiempo de ejecución.
.. versionadded:: 1.0
"""
# Ruta al archivo de configuración de tiempo de ejecución
# especificado por el usuario.
self._set(_ruta_al_acte=ruta)
# Datos cargados desde el archivo de configuración en tiempo de ejecución.
self._set(_acte={})
# Si el archivo de configuración de tiempo de ejecución se ha cargado
# o no (o ``None`` si aún no se ha intentado cargar).
self._set(_acte_seteado=None)
def cargar_acte(self, combinar=True):
"""
Carga un archivo de configuración a nivel-acte, si se especificó uno.
When the CLI framework creates a `Config`, it sets ``_ruta_al_acte``,
which is a full ruta to the requested config file. This method
attempts to load that file.
Cuando el framework CLI crea una `Config`, establece
``_ruta_al_acte``, que es una ruta completa al archivo de configuración
solicitado. Este método intenta cargar ese archivo.
:param bool combinar:
Ya sea para combinar los datos cargados en la config. central.
por Defecto:``True``
:returns: ``None``.
.. versionadded:: 1.0
"""
self._cargar_archivo(prefijo="acte", absoluto=True, combinar=combinar)
def cargar_entorno_shell(self):
"""
Cargar valores del entorno de shell.
`.cargar_entorno_shell` está diseñado para ejecutarse tarde en el ciclo
de vida de un objeto `.Config`, una vez que se hayan cargado todas las
demás fuentes (como un archivo de config en tiempo de ejecución o
config por-colección). La carga desde el shell no es tremendamente
costoso, pero debe realizarse en un momento específico para garantizar
que el comportamiento de "las únicas claves de config conocidas se
cargan desde el entorno" funcione correctamente.
Consulte :ref:`entorno-vars` para obtener detalles sobre esta decisión
de diseño y otra información sobre cómo se escanean y cargan las
variables de entorno.
.. versionadded:: 1.0
"""
# Forzar la fusión de datos existentes para garantizar que tengamos
# una imagen actualizada
debug("Ejecutando pre-combinar para carga de entorno shell...")
self.combinar()
debug("Hecho con pre-combinar.")
cargador = Entorno(config=self._config, prefijo=self._entorno_prefijo)
self._set(_varent=cargador.cargar())
debug("Entorno shell cargado, desencadena la fusión final")
self.combinar()
def cargar_coleccion(self, datos, combinar=True):
"""
Actualice los datos de config controlados por coleccion.
`.cargar_coleccion` está destinado a ser utilizado por la maquinaria
de ejecución principal de artefactos, que es responsable de obtener
datos basados en la colección.
Ver :ref: `coleccion-configuration` para más detalles.
.. versionadded:: 1.0
"""
debug("Cargando configuración de coleccion")
self._set(_coleccion=datos)
if combinar:
self.combinar()
def setea_proyecto_ruta(self, ruta):
"""
Setea la ruta del dir donde se puede encontrar un archivo de conf a
nivel-de-proyecto.
No carga ningún archivo por sí solo; para eso, ver `cargar_proyecto`.
.. versionadded:: 1.0
"""
# 'Prefijo' para que coincida con los otros conjuntos de atributos
proyecto_prefijo = None
if ruta is not None:
# Asegúrese de que el prefijo esté normalizado a una cadena de
# ruta similar a un directorio
proyecto_prefijo = join(ruta, "")
self._set(_proyecto_prefijo=proyecto_prefijo)
# Ruta al archivo de config por-proyecto cargado, si lo hubiera.
self._set(_proyecto_ruta=None)
# Si el archivo de configuración del proyecto se ha cargado
# o no (o ``None`` si aún no se ha intentado cargar).
self._set(_py_seteado=None)
# Datos cargados desde el archivo de config por proyecto.
self._set(_py={})
def _cargar_archivo(self, prefijo, absoluto=False, combinar=True):
# Preparar
ubicado = "_{}_ubicado".format(prefijo)
ruta = "_{}_ruta".format(prefijo)
datos = "_{}".format(prefijo)
arreglo = self.archivo_prefijo
if arreglo is None:
arreglo = self.prefijo
# Cortocircuito si la carga parece haber ocurrido ya
if getattr(self, ubicado) is not None:
return
# Moar configuracion (mas config!!)
if absoluto:
ruta_absoluta = getattr(self, ruta)
# None -> ruta absoluta esperada pero ninguna establecida, cortocircuito
if ruta_absoluta is None:
return
rutas = [ruta_absoluta]
else:
prefijo_de_ruta = getattr(self, "_{}_prefijo".format(prefijo))
# Cortocircuito si la carga parece innecesaria (por ejemplo, para archivos
# de config del py cuando no se está quedando sin un proyecto)
if prefijo_de_ruta is None:
return
rutas = [
".".join((prefijo_de_ruta + arreglo, x))
for x in self._sufijos_de_archivo
]
# Empujandolos
for rutadearchivo in rutas:
# Normalize
rutadearchivo = expanduser(rutadearchivo)
try:
try:
tipo_ = splitext(rutadearchivo)[1].lstrip(".")
cargador = getattr(self, "_load_{}".format(tipo_))
except AttributeError:
msj = "Los archivod de config tipo {!r} (del arch. {!r}) no son compatibles! Use uno de: {!r}" # noqa
raise TipoDeArchivoDesconocido(
msj.format(tipo_, rutadearchivo, self._sufijos_de_archivo)
)
# Almacenar datos, la ruta en la que se encontró y el hecho de
# por que se encontró
self._set(datos, cargador(rutadearchivo))
self._set(ruta, rutadearchivo)
self._set(ubicado, True)
break
# Normalmente significa 'no existe tal archivo', así que solo anótelo y salte.
except IOError as e:
if e.errno == 2:
err = "No vi ninguno {}, ignorando!."
debug(err.format(rutadearchivo))
else:
raise
# Aún ninguna -> ninguna ruta con sufijo fue ubicado, registre este hecho
if getattr(self, ruta) is None:
self._set(ubicado, False)
# Fusionar datos cargados en si se encontró alguno
elif combinar:
self.combinar()
def _cargar_yaml(self, ruta):
with open(ruta) as fd:
return yaml.load(fd)
def _cargar_yml(self, ruta):
return self._cargar_yaml(ruta)
def _cargar_json(self, ruta):
with open(ruta) as fd:
return json.load(fd)
def _cargar_py(self, ruta):
datos = {}
for clave, valor in six.iteritems(load_source("mod", ruta)):
# Elimine miembros especiales, ya que siempre serán incorporados
# y otras cosas especiales que un usuario no querrá en su configuración.
if clave.startswith("__"):
continue
# Generar excepciones en los valores del módulo; son imposibles de cortar.
# TODO: ¿succionarlo y volver a implementar copy() sin decapado?
# Por otra parte, un usuario que intenta introducir un módulo en su
# configuración probablemente está haciendo algo mejor en el acte/library
# y no en un "archivo de configuración" ... ¿verdad?
if isinstance(valor, types.ModuleType):
err = "'{}' es un módulo, que no se puede utilizar como valor de config. (¿Quizás está dando un archivo de artefactos en lugar de un archivo de configuración por error??)" # noqa
raise MiembroDeConfigNoSeleccionable(err.format(clave))
datos[clave] = valor
return datos
def combinar(self):
"""
Fusiona todas las fuentes de configuración, en orden.
.. versionadded:: 1.0
"""
debug("Fusionando fuentes de config en orden en un nuevo config_vacio...")
self._set(_config={})
debug("Defaults: {!r}".format(self._defaults))
fusionar_dics(self._config, self._defaults)
debug("Coleccion-conducida: {!r}".format(self._coleccion))
fusionar_dics(self._config, self._coleccion)
self._fusionar_archivo("sistema", "Sistema-completo")
self._fusionar_archivo("ususario", "Por-usuario")
self._fusionar_archivo("proyecto", "Por-proyecto")
debug("Configuración de variable de entorno: {!r}".format(self._varent))
fusionar_dics(self._config, self._varent)
self._fusionar_archivo("acte", "Tiempoej")
debug("Anulaciones: {!r}".format(self._anula))
fusionar_dics(self._config, self._anula)
debug("Modificaciones: {!r}".format(self._modificaciones))
fusionar_dics(self._config, self._modificaciones)
debug("Eliminaciones: {!r}".format(self._eliminaciones))
aniquilar(self._config, self._eliminaciones)
def _fusionar_archivo(self, nombre, desc):
# Preparar
desc += " archivo de configuración" # yup
_ubicado = getattr(self, "_{}_ubicado".format(nombre))
ruta = getattr(self, "_{}_ruta".format(nombre))
datos = getattr(self, "_{}".format(nombre))
# None -> todavía no se ha cargado
if _ubicado is None:
debug("{} aún no se ha cargado, omitiendo".format(desc))
# True -> hurra
elif ubicado:
debug("{} ({}): {!r}".format(desc, ruta, datos))
fusionar_dics(self._config, datos)
# False -> lo intentó, no tuvo éxito
else:
# TODO: ¿cómo conservar lo que se intentó para cada caso pero solo
# para el negativo? ¿Solo una rama aquí basada en 'nombre'?
debug("{} no encontrado, saltando".format(desc))
def clonar(self, dentro=None):
"""
Devuelve una copia de este objeto de configuración.
El nuevo objeto será idéntico en términos de fuentes configuradas y
cualquier dato cargado (o manipulado por el usuario), pero será un
objeto distinto con el menor estado mutable compartido posible.
Específicamente, todos los valores de `dict` dentro de la config se
recrean recursivamente, con valores de hoja non-dict sujetos a
`copy.copy` (nota: *no* `copy.deepcopy`, ya que esto puede causar
problemas con varios objetos como compilado regexen o bloqueos de
subprocesos, que a menudo se encuentran enterrados en agregados ricos
como API o clientes de BD).
Los únicos valores de config restantes que pueden terminar compartidos
entre una config y su clon son, por lo tanto, aquellos objetos 'ricos'
que nomm'copian.copy' limpiamente, o componen objetos non-dict (como
listas o tuplas).
:param dentro:
Una subclase `.Config` a la que debería "actualizarse" el nuevo
clon.
Usado por bibliotecas cliente que tienen sus propias subclases
`.Config` que p. Ej. definen valores predeterminados adicionales;
clonando "en" una de estas subclases asegura que las
claves/subarboles nuevos se agreguen con elegancia, sin
sobrescribir nada que pueda haber sido predefinido.
default: ``None`` (simplemente clone en otro `.Config` regular).
:returns:
Un `.Config`, o una instancia de la clase dada a ``dentro``.
:raises:
``TypeError``, si se le da un valor a ``dentro`` y ese valor no es
una subclase `.Config`.
.. versionadded:: 1.0
"""
# Verificación de cordura para 'dentro'
if dentro is not None and not issubclass(dentro, self.__class__):
err = "'dentro' debe ser una subclase de {}!"
raise TypeError(err.format(self.__class__.__name__))
# Construir nuevo objeto
klase = self.__class__ if dentro is None else dentro
# También permite kwargs de constructores arbitrarios, para subclases
# donde se desea pasar (algunos) datos en el tiempo de inicio (vs copia post-init)
# TODO: ¿probablemente quieras hacer pivotar a toda la clase de esta
# manera eventualmente ...?. Ya no recuerdo exactamente por qué optamos
# originalmente por el enfoque 'nueva configuración de atributo init +'
# ... aunque claramente hay una discrepancia de impedancia entre "Quiero que
# sucedan cosas en la instanciación de mi configuración" y "Quiero que la
# clonación no se active ciertas cosas como la carga de fuentes de datos
# externas ".
# NOTE: esto incluirá lento=True, vea el final del método
nuevo = klase(**self._clona_kwargs_inic(dentro=dentro))
# Copy/combinar/etc all 'private' datos sources and attributes
for nombre in """
coleccion
sistema_prefijo
sistema_ruta
sistema_ubicado
sistema
ususario_prefijo
ususario_ruta
ususario_ubicado
usuario
proyecto_prefijo
proyecto_ruta
proyecto_ubicado
proyecto
entorno_prefijo
entorno
acte_ruta
acte_ubicado
acte
anulaciones
modificaciones
""".split():
nombre = "_{}".format(nombre)
mis_datos = getattr(self, nombre)
# Los datos Non-dict se transfieren directamente (via un copy())
# NOTA: presumiblemente alguien realmente podría arruinar y
# cambiar los tipos de estos valores, pero en ese momento está en ellos ...
if not isinstance(mis_datos, dict):
nuevo._set(nombre, copy.copy(mis_datos))
# DLos Diccs de datos se fusiona (lo que también implica una
# copy.copy eventualmente)
else:
fusionar_dics(getattr(nuevo, nombre), mis_datos)
# Haga lo que hubiera hecho __init__ si no fuera lento, es decir, cargar
# archivos de conf usuario/sistema.
nuevo._cargar_archivo_de_conf_base()
# Finalmente, combinar() para reales (_load_base_conf_files no lo hace
# internamente, por lo que los datos no aparecerían de otra manera).
nuevo.combinar()
return nuevo
def _clona_kwargs_inic(self, dentro=None):
"""
Suministre kwargs adecuados para inicializar un nuevo clon de este
objeto.
Tenga en cuenta que la mayor parte del proceso `.clonar` implica
copiar datos entre dos instancias en lugar de pasar init kwargs; sin
embargo, a veces realmente quieres init kwargs, razón por la cual
existe este método.
:param dentro: El valor de ``dentro`` como se pasó a la llamada
`.clonar`.
:returns: un `dict`.
"""
# NOTE: debe pasar los valores predeterminados frescos o de lo contrario
# se usará global_defaults() en su lugar. Excepto cuando 'dentro' está en
# juego, en cuyo caso realmente queremos la unión de los dos.
nuevos_defaults = copiar_dic(self._defaults)
if dentro is not None:
fusionar_dics(nuevos_defaults, dentro.global_defaults())
# Los kwargs.
return dict(
defaults=nuevos_defaults,
# TODO: considere hacer esto 'codificado' en el extremo de la
# llamada (es decir, dentro de clonar()) para asegurarse de que
# nadie lo bombardee accidentalmente a través de subclases.
lento=True,
)
def _modificar(self, rutaclave, clave, valor):
"""
Actualiza nuestro nivel de configuración de modificaciones-de-usuario
con nuevos datos.
:param tuple rutaclave:
La ruta de clave que identifica el sub-dict que se actualiza.
Puede ser una tupla vacía si la actualización ocurre en el nivel
superior.
:param str clave:
La clave actual recibe una actualización.
:param valor:
El valor que se escribe.
"""
# Primero, asegúrese de borrar la rutaclave de _eliminaciones, en caso
# de que se haya eliminado previamente.
extirpar(self._eliminaciones, rutaclave + (clave,))
# Ahora podemos agregarlo a la estructura de modificaciones.
datos = self._modificaciones
rutaclave = list(rutaclave)
while rutaclave:
subclave = rutaclave.pop(0)
# TODO: podría usar defaultdict aquí, pero ... ¿meh?
if subclave not in datos:
# TODO: genera esta y las siguientes 3 líneas ...
datos[subclave] = {}
datos = datos[subclave]
datos[clave] = valor
self.combinar()
def _eliminar(self, rutaclave, clave):
"""
Como `._modificar`, pero para eliminar.
"""
# NOTE: debido a que las eliminaciones se procesan en combinar() en último
# lugar, no necesitamos eliminar cosas de _modificaciones al eliminarlas;
# pero nosotros *hacemos* lo inverso - eliminar de _eliminaciones al modificar.
# TODO: ¿sería sensato impulsar este paso a los llamadores?
datos = self._eliminaciones
rutaclave = list(rutaclave)
while rutaclave:
subclave = rutaclave.pop(0)
if subclave in datos:
datos = datos[subclave]
# Si encontramos None, significa que algo más alto que nuestra
# rutaclave solicitada ya está marcado como eliminado; para
# que no tengamos que hacer nada ni ir más lejos.
if datos is None:
return
# De lo contrario, es presumiblemente otro dict, así que sigue repitiendo ...
else:
# Clave no encontrada -> nadie ha marcado nada a lo largo de esta parte
# del camino para su eliminación, así que comenzaremos a construirlo.
datos[subclave] = {}
# Luego prepárate para la próxima iteración
datos = datos[subclave]
# Bucle salido -> los datos deben ser el dict de más hojas, por lo que ahora
# podemos establecer nuestra clave eliminada en None
datos[clave] = None
self.combinar()
class ErrorDeFusionAmbiguo(ValueError):
pass
def fusionar_dics(base, updates):
"""
Recursivamente combina dict ``updates`` en dict ``base`` (mutando ``base``.)
* Se recurrirá a los valores que son en sí mismos dicccionarios.
* Valores que son un dict en una entrada y *no* un dict en la otra entrada
(por ejemplo, si nuestras entradas fueran
``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) son irreconciliables y
generará una excepción.
* Los valores de hoja no-dict se ejecutan a través de `copy.copy` para
evitar el sangrado de estado.
.. note::
Esto es efectivamente un "copy.deepcopy" ligero que ofrece protección
contra tipos no coincidentes (dict vs non-dict) y evita algunos
problemas centrales de deepcopy (como la forma en que explota en
ciertos tipos de objetos).
:returns:
El valor de ``base``, que es más útil para funciones de envoltura
como `copiar_dic`.
.. versionadded:: 1.0
"""
# TODO: for chrissakes just make it return instead of mutating?
for clave, valor in (updates or {}).items():
# Dict values whose claves also exist in 'base' -> recurse
# (But only if both types are dicts.)
if clave in base:
if isinstance(valor, dict):
if isinstance(base[clave], dict):
fusionar_dics(base[clave], valor)
else:
raise _merge_error(base[clave], valor)
else:
if isinstance(base[clave], dict):
raise _merge_error(base[clave], valor)
# Fileno-bearing objects are probably 'real' files which do not
# copy well & must be passed by reference. Meh.
elif hasattr(valor, "fileno"):
base[clave] = valor
else:
base[clave] = copy.copy(valor)
# New values get set anew
else:
# Dict values get reconstructed to avoid being references to the
# updates dict, which can lead to nasty state-bleed bugs otherwise
if isinstance(valor, dict):
base[clave] = copiar_dic(valor)
# Fileno-bearing objects are probably 'real' files which do not
# copy well & must be passed by reference. Meh.
elif hasattr(valor, "fileno"):
base[clave] = valor
# Non-dict values just get set straight
else:
base[clave] = copy.copy(valor)
return base
def _merge_error(orig, new_):
return ErrorDeFusionAmbiguo(
"Can't cleanly merge {} with {}".format(
_format_mismatch(orig), _format_mismatch(new_)
)
)
def _format_mismatch(x):
return "{} ({!r})".format(type(x), x)
def copiar_dic(origen):
"""
Devuelve una copia nueva de ``origen`` con el menor estado compartido
posible.
Utiliza `fusionar_dics` debajo del capó, con un dict ``base`` vacío;
consulte su documentación para obtener detalles sobre el comportamiento.
.. versionadded:: 1.0
"""
return fusionar_dics({}, origen)
def extirpar(dic_, rutaclave):
"""
Quita la clave apuntada por ``rutaclave`` del dict anidado ``dic_``, si
existe.
.. versionadded:: 1.0
"""
datos = dic_
rutaclave = list(rutaclave)
hoja_key = rutaclave.pop()
while rutaclave:
clave = rutaclave.pop(0)
if clave not in datos:
# No ahí, nada que extirpar
return
datos = datos[clave]
if hoja_key in datos:
del datos[hoja_key]
def aniquilar(base, eliminaciones):
"""
Eliminar todas las claves (anidado) mencionadas en ''eliminaciones'', de
``base``.
.. versionadded:: 1.0
"""
for clave, valor in six.iteritems(eliminaciones):
if isinstance(valor, dict):
# NOTE: no probar si la clave [clave] existe; si algo está
# listado en una estructura de eliminaciones, debe existir en
# alguna fuente en algún lugar y, por lo tanto, también en la
# caché que se borra.
aniquilar(base[clave], eliminaciones[clave])
else: # implicitly None
del base[clave]
| [] | [] | [
"COMSPEC"
] | [] | ["COMSPEC"] | python | 1 | 0 |