hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 0,
"code_window": [
" ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 41
} | debug doctor zipdir --verbose
----
debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose
reading testdata/doctor/debugzip/system.descriptor.txt
reading testdata/doctor/debugzip/system.namespace.txt
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
reading testdata/doctor/debugzip/system.jobs.txt
Examining 37 descriptors and 42 namespace entries...
ParentID 0, ParentSchemaID 0: database "system" (1): processed
ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: relation "users" (4): processed
ParentID 1, ParentSchemaID 29: relation "zones" (5): processed
ParentID 1, ParentSchemaID 29: relation "settings" (6): processed
ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed
ParentID 1, ParentSchemaID 29: relation "lease" (11): processed
ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: relation "ui" (14): processed
ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed
ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: relation "locations" (21): processed
ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed
ParentID 1, ParentSchemaID 29: relation "comments" (24): processed
ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed
ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed
ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: database "postgres" (51): processed
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "users" (53): processed
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles"
ParentID 52, ParentSchemaID 29: relation "rides" (55): processed
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides"
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed
ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed
ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed
ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed
ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed
ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed
ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed
ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed
ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed
ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed
ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed
ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed
ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed
ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed
ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed
ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed
Examining 2 jobs...
Processing job 587337426939772929
Processing job 587337426984566785
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir_verbose | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.9443483948707581,
0.11422600597143173,
0.0001909757120301947,
0.0006782904965803027,
0.2802562117576599
] |
{
"id": 0,
"code_window": [
" ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 41
} | // Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package sqlproxyccl
import (
"crypto/tls"
"net"
"github.com/jackc/pgproto3/v2"
)
// FrontendAdmit is the default implementation of a frontend admitter. It can
// upgrade to an optional SSL connection, and will handle and verify the startup
// message received from the PG SQL client. The connection returned should never
// be nil in case of error. Depending on whether the error happened before the
// connection was upgraded to TLS or not it will either be the original or the
// TLS connection.
var FrontendAdmit = func(
conn net.Conn, incomingTLSConfig *tls.Config,
) (net.Conn, *pgproto3.StartupMessage, error) {
// `conn` could be replaced by `conn` embedded in a `tls.Conn` connection,
// hence it's important to close `conn` rather than `proxyConn` since closing
// the latter will not call `Close` method of `tls.Conn`.
var sniServerName string
// Read first message from client.
m, err := pgproto3.NewBackend(pgproto3.NewChunkReader(conn), conn).ReceiveStartupMessage()
if err != nil {
return conn, nil, newErrorf(codeClientReadFailed, "while receiving startup message")
}
// CancelRequest is unencrypted and unauthenticated, regardless of whether
// the server requires TLS connections. For now, ignore the request to cancel,
// and send back a nil StartupMessage, which will cause the proxy to just
// close the connection in response.
if _, ok := m.(*pgproto3.CancelRequest); ok {
return conn, nil, nil
}
// If we have an incoming TLS Config, require that the client initiates with
// an SSLRequest message.
if incomingTLSConfig != nil {
if _, ok := m.(*pgproto3.SSLRequest); !ok {
code := codeUnexpectedInsecureStartupMessage
return conn, nil, newErrorf(code, "unsupported startup message: %T", m)
}
_, err = conn.Write([]byte{pgAcceptSSLRequest})
if err != nil {
return conn, nil, newErrorf(codeClientWriteFailed, "acking SSLRequest: %v", err)
}
cfg := incomingTLSConfig.Clone()
cfg.GetConfigForClient = func(h *tls.ClientHelloInfo) (*tls.Config, error) {
sniServerName = h.ServerName
return nil, nil
}
conn = tls.Server(conn, cfg)
// Now that SSL is established, read the encrypted startup message.
m, err = pgproto3.NewBackend(pgproto3.NewChunkReader(conn), conn).ReceiveStartupMessage()
if err != nil {
return conn, nil, newErrorf(codeClientReadFailed, "receiving post-TLS startup message: %v", err)
}
}
if startup, ok := m.(*pgproto3.StartupMessage); ok {
// Add the sniServerName (if used) as parameter
if sniServerName != "" {
startup.Parameters["sni-server"] = sniServerName
}
return conn, startup, nil
}
code := codeUnexpectedStartupMessage
return conn, nil, newErrorf(code, "unsupported post-TLS startup message: %T", m)
}
| pkg/ccl/sqlproxyccl/frontend_admitter.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017748989921528846,
0.00016985720139928162,
0.0001599833049112931,
0.000170803556102328,
0.00000491416358272545
] |
{
"id": 0,
"code_window": [
" ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 41
} | post
----
----
searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "cmd/roachtest: some-roachtest failed" label:branch-release-0.1: [github.Issue{Number:30, Title:"boom", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.1"}]}]
searchIssue repo:"cockroach" user:"cockroachdb" is:issue is:open in:title label:"C-test-failure" sort:created-desc "cmd/roachtest: some-roachtest failed" -label:branch-release-0.1: [github.Issue{Number:31, Title:"boom related", Labels:[github.Label{URL:"fake", Name:"C-test-failure"} github.Label{URL:"fake", Name:"O-robot"} github.Label{URL:"fake", Name:"release-0.2"}]}]
createComment owner=cockroachdb repo=cockroach issue=30:
cmd/roachtest.some-roachtest [failed](https://teamcity.example.com/viewLog.html?buildId=8008135&tab=buildLog) on release-0.1 @ [abcd123](https://github.com/cockroachdb/cockroach/commits/abcd123):
```
boom
```
<details><summary>Help</summary>
<p>
See: [FooBar README](https://github.com/cockroachdb/cockroach)
Parameters in this failure:
- TAGS=deadlock
- GOFLAGS=race
</p>
</details>
<details><summary>Same failure on other branches</summary>
<p>
- #31 boom related [C-test-failure O-robot release-0.2]
</p>
</details>
<sub>
[This test on roachdash](https://roachdash.crdb.dev/?filter=status:open%20t:.*some-roachtest.*&sort=title+created&display=lastcommented+project) | [Improve this report!](https://github.com/cockroachdb/cockroach/tree/master/pkg/cmd/internal/issues)
</sub>
Rendered: https://github.com/cockroachdb/cockroach/issues/new?body=cmd%2Froachtest.some-roachtest+%5Bfailed%5D%28https%3A%2F%2Fteamcity.example.com%2FviewLog.html%3FbuildId%3D8008135%26tab%3DbuildLog%29+on+release-0.1+%40+%5Babcd123%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Fcommits%2Fabcd123%29%3A%0A%0A%0A%60%60%60%0Aboom%0A%60%60%60%0A%3Cdetails%3E%3Csummary%3EHelp%3C%2Fsummary%3E%0A%3Cp%3E%0A%0A%0ASee%3A+%5BFooBar+README%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%29%0A%0AParameters+in+this+failure%3A%0A%0A-+TAGS%3Ddeadlock%0A%0A-+GOFLAGS%3Drace%0A%3C%2Fp%3E%0A%3C%2Fdetails%3E%0A%3Cdetails%3E%3Csummary%3ESame+failure+on+other+branches%3C%2Fsummary%3E%0A%3Cp%3E%0A%0A-+%2331+boom+related+%5BC-test-failure+O-robot+release-0.2%5D%0A%3C%2Fp%3E%0A%3C%2Fdetails%3E%0A%3Csub%3E%0A%0A%5BThis+test+on+roachdash%5D%28https%3A%2F%2Froachdash.crdb.dev%2F%3Ffilter%3Dstatus%3Aopen%2520t%3A.%2Asome-roachtest.%2A%26sort%3Dtitle%2Bcreated%26display%3Dlastcommented%2Bproject%29+%7C+%5BImprove+this+report%21%5D%28https%3A%2F%2Fgithub.com%2Fcockroachdb%2Fcockroach%2Ftree%2Fmaster%2Fpkg%2Fcmd%2Finternal%2Fissues%29%0A%3C%2Fsub%3E%0A&title=%3Ccomment%3E
----
----
| pkg/cmd/internal/issues/testdata/post/failure-with-url-matching-and-related-issue.txt | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001754599215928465,
0.00017166875477414578,
0.00016664880968164653,
0.00017128614126704633,
0.00000312298902827024
] |
{
"id": 0,
"code_window": [
" ParentID 0, ParentSchemaID 0: database \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: database \"postgres\" (51): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"users\" (53): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 41
} | statement ok
SET experimental_enable_unique_without_index_constraints = true
statement ok
CREATE TABLE test_kv(k INT PRIMARY KEY, v INT, w DECIMAL);
CREATE UNIQUE INDEX test_v_idx ON test_kv(v);
CREATE INDEX test_v_idx2 ON test_kv(v DESC) STORING(w);
CREATE INDEX test_v_idx3 ON test_kv(w) STORING(v);
CREATE TABLE test_kvr1(k INT PRIMARY KEY REFERENCES test_kv(k));
CREATE TABLE test_kvr2(k INT, v INT UNIQUE REFERENCES test_kv(k));
CREATE TABLE test_kvr3(k INT, v INT UNIQUE REFERENCES test_kv(v));
CREATE TABLE test_kvi1(k INT PRIMARY KEY);
CREATE TABLE test_kvi2(k INT PRIMARY KEY, v INT);
CREATE UNIQUE INDEX test_kvi2_idx ON test_kvi2(v);
CREATE VIEW test_v1 AS SELECT v FROM test_kv;
CREATE VIEW test_v2 AS SELECT v FROM test_v1;
CREATE TABLE test_uwi_parent(a INT UNIQUE WITHOUT INDEX);
CREATE TABLE test_uwi_child(a INT REFERENCES test_uwi_parent(a));
query ITITTBTB colnames
SELECT * FROM crdb_internal.table_columns WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, column_id
----
descriptor_id descriptor_name column_id column_name column_type nullable default_expr hidden
106 test_kv 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false
106 test_kv 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
106 test_kv 3 w family:DecimalFamily width:0 precision:0 locale:"" visible_type:0 oid:1700 time_precision_is_set:false true NULL false
107 test_kvr1 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false
108 test_kvr2 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
108 test_kvr2 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
108 test_kvr2 3 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true
109 test_kvr3 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
109 test_kvr3 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
109 test_kvr3 3 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true
110 test_kvi1 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false
111 test_kvi2 1 k family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false NULL false
111 test_kvi2 2 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
112 test_v1 1 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
113 test_v2 1 v family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
114 test_uwi_parent 1 a family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
114 test_uwi_parent 2 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true
115 test_uwi_child 1 a family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false true NULL false
115 test_uwi_child 2 rowid family:IntFamily width:64 precision:0 locale:"" visible_type:0 oid:20 time_precision_is_set:false false unique_rowid() true
query ITITTBBB colnames
SELECT descriptor_id, descriptor_name, index_id, index_name, index_type, is_unique, is_inverted, is_sharded
FROM crdb_internal.table_indexes
WHERE descriptor_name LIKE 'test_%'
ORDER BY descriptor_id, index_id
----
descriptor_id descriptor_name index_id index_name index_type is_unique is_inverted is_sharded
106 test_kv 1 test_kv_pkey primary true false false
106 test_kv 2 test_v_idx secondary true false false
106 test_kv 3 test_v_idx2 secondary false false false
106 test_kv 4 test_v_idx3 secondary false false false
107 test_kvr1 1 test_kvr1_pkey primary true false false
108 test_kvr2 1 test_kvr2_pkey primary true false false
108 test_kvr2 2 test_kvr2_v_key secondary true false false
109 test_kvr3 1 test_kvr3_pkey primary true false false
109 test_kvr3 2 test_kvr3_v_key secondary true false false
110 test_kvi1 1 test_kvi1_pkey primary true false false
111 test_kvi2 1 test_kvi2_pkey primary true false false
111 test_kvi2 2 test_kvi2_idx secondary true false false
112 test_v1 0 · primary false false false
113 test_v2 0 · primary false false false
114 test_uwi_parent 1 test_uwi_parent_pkey primary true false false
115 test_uwi_child 1 test_uwi_child_pkey primary true false false
query ITITTITTB colnames
SELECT * FROM crdb_internal.index_columns WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, column_type, column_id
----
descriptor_id descriptor_name index_id index_name column_type column_id column_name column_direction implicit
106 test_kv 1 test_kv_pkey key 1 k ASC false
106 test_kv 2 test_v_idx extra 1 NULL NULL false
106 test_kv 2 test_v_idx key 2 v ASC false
106 test_kv 3 test_v_idx2 extra 1 NULL NULL false
106 test_kv 3 test_v_idx2 key 2 v DESC false
106 test_kv 3 test_v_idx2 storing 3 NULL NULL false
106 test_kv 4 test_v_idx3 composite 3 NULL NULL false
106 test_kv 4 test_v_idx3 extra 1 NULL NULL false
106 test_kv 4 test_v_idx3 key 3 w ASC false
106 test_kv 4 test_v_idx3 storing 2 NULL NULL false
107 test_kvr1 1 test_kvr1_pkey key 1 k ASC false
108 test_kvr2 1 test_kvr2_pkey key 3 rowid ASC false
108 test_kvr2 2 test_kvr2_v_key extra 3 NULL NULL false
108 test_kvr2 2 test_kvr2_v_key key 2 v ASC false
109 test_kvr3 1 test_kvr3_pkey key 3 rowid ASC false
109 test_kvr3 2 test_kvr3_v_key extra 3 NULL NULL false
109 test_kvr3 2 test_kvr3_v_key key 2 v ASC false
110 test_kvi1 1 test_kvi1_pkey key 1 k ASC false
111 test_kvi2 1 test_kvi2_pkey key 1 k ASC false
111 test_kvi2 2 test_kvi2_idx extra 1 NULL NULL false
111 test_kvi2 2 test_kvi2_idx key 2 v ASC false
114 test_uwi_parent 1 test_uwi_parent_pkey key 2 rowid ASC false
115 test_uwi_child 1 test_uwi_child_pkey key 2 rowid ASC false
query ITIIITITT colnames
SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, dependson_type, dependson_id, dependson_index_id
----
descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details
107 test_kvr1 NULL NULL 106 fk 1 test_kvr1_k_fkey NULL
108 test_kvr2 NULL NULL 106 fk 1 test_kvr2_v_fkey NULL
109 test_kvr3 NULL NULL 106 fk 2 test_kvr3_v_fkey NULL
112 test_v1 NULL NULL 106 view NULL NULL NULL
113 test_v2 NULL NULL 112 view NULL NULL NULL
115 test_uwi_child NULL NULL 114 fk 0 test_uwi_child_a_fkey NULL
query ITIITITT colnames
SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'test_%' ORDER BY descriptor_id, index_id, dependedonby_type, dependedonby_id, dependedonby_index_id
----
descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details
106 test_kv NULL 107 fk NULL NULL NULL
106 test_kv NULL 108 fk NULL NULL NULL
106 test_kv NULL 109 fk NULL NULL NULL
106 test_kv NULL 112 view 0 NULL Columns: [2]
112 test_v1 NULL 113 view 0 NULL Columns: [1]
114 test_uwi_parent NULL 115 fk NULL NULL NULL
# Checks view dependencies (#17306)
statement ok
CREATE TABLE moretest_t(k INT, v INT);
CREATE VIEW moretest_v AS SELECT v FROM moretest_t WHERE FALSE
query ITIIITITT colnames
SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'moretest_%' ORDER BY descriptor_id, index_id, dependson_type, dependson_id, dependson_index_id
----
descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details
117 moretest_v NULL NULL 116 view NULL NULL NULL
query ITIITITT colnames
SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'moretest_%' ORDER BY descriptor_id, index_id, dependedonby_type, dependedonby_id, dependedonby_index_id
----
descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details
116 moretest_t NULL 117 view 0 NULL Columns: [2]
# Check sequence dependencies.
statement ok
CREATE SEQUENCE blog_posts_id_seq
statement ok
CREATE TABLE blog_posts (id INT PRIMARY KEY DEFAULT nextval('blog_posts_id_seq'), title text)
query ITIIITITT colnames
SELECT * FROM crdb_internal.backward_dependencies WHERE descriptor_name LIKE 'blog_posts'
----
descriptor_id descriptor_name index_id column_id dependson_id dependson_type dependson_index_id dependson_name dependson_details
119 blog_posts NULL 1 118 sequence NULL NULL NULL
query ITIITITT colnames
SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name LIKE 'blog_posts%'
----
descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details
118 blog_posts_id_seq NULL 119 sequence 0 NULL Columns: [1]
| pkg/sql/logictest/testdata/logic_test/dependencies | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001751040981616825,
0.00016951175348367542,
0.00016317021800205112,
0.0001704189198790118,
0.0000033402616281819064
] |
{
"id": 1,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 43
} | debug doctor zipdir --verbose
----
debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose
reading testdata/doctor/debugzip/system.descriptor.txt
reading testdata/doctor/debugzip/system.namespace.txt
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
reading testdata/doctor/debugzip/system.jobs.txt
Examining 37 descriptors and 42 namespace entries...
ParentID 0, ParentSchemaID 0: database "system" (1): processed
ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: relation "users" (4): processed
ParentID 1, ParentSchemaID 29: relation "zones" (5): processed
ParentID 1, ParentSchemaID 29: relation "settings" (6): processed
ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed
ParentID 1, ParentSchemaID 29: relation "lease" (11): processed
ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: relation "ui" (14): processed
ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed
ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: relation "locations" (21): processed
ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed
ParentID 1, ParentSchemaID 29: relation "comments" (24): processed
ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed
ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed
ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: database "postgres" (51): processed
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "users" (53): processed
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles"
ParentID 52, ParentSchemaID 29: relation "rides" (55): processed
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides"
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed
ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed
ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed
ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed
ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed
ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed
ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed
ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed
ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed
ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed
ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed
ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed
ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed
ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed
ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed
ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed
Examining 2 jobs...
Processing job 587337426939772929
Processing job 587337426984566785
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir_verbose | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.9944576025009155,
0.10318175703287125,
0.00020600168500095606,
0.0011755257146432996,
0.2971707880496979
] |
{
"id": 1,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 43
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import { analytics } from "src/redux/analytics";
export function trackTerminateSession() {
const boundTrack = analytics.track.bind(analytics);
(() => {
boundTrack({
event: "Terminate Session",
});
})();
}
export function trackTerminateQuery() {
const boundTrack = analytics.track.bind(analytics);
(() => {
boundTrack({
event: "Terminate Query",
});
})();
}
| pkg/ui/workspaces/db-console/src/util/analytics/trackTerminate.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017951583140529692,
0.00017653295071795583,
0.00017389531421940774,
0.00017618767742533237,
0.000002307517888766597
] |
{
"id": 1,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 43
} | pretty
#
# Generate code for interesting rule.
#
[Relational]
define Select {
# Input comment.
Input RelExpr
Filters FiltersExpr
}
[Relational, Join, JoinNonApply]
define InnerJoin {
Left RelExpr
Right RelExpr
On FiltersExpr
}
[Scalar, Bool, List]
define Filters {
}
[Scalar, Bool, ListItem, ScalarProps]
define FiltersItem {
Condition ScalarExpr
}
# ---
# Unattached comment.
# ---
[PushSelectIntoJoinLeft, Normalize]
(Select | Other
$input:(InnerJoin | InnerJoinApply | SomethingElse | Many | Things | LooooooooongLine | Much | More | Than | EightyChars
$left:3
$right:*
$on:*
)
$filters:[
...
$item:* & (IsBoundBy $item $leftCols:(OutputCols $left))
...
]
)
=>
(Select
((OpName $input)
(Select
$left
(ExtractBoundConditions $filters $leftCols)
)
$right
$on
)
(ExtractUnboundConditions $filters $leftCols)
)
[TestSingleLine]
(Select $input:(InnerJoin | InnerJoinApply $left:* $right:* $on:*))
=>
(False)
[TestNestedAnd]
(Select
$right:* &
(HasOuterCols $right) &
^(GroupBy | DistinctOn $input:* $aggregations:* $groupingPrivate:*) &
(IsUnorderedGrouping $groupingPrivate)
$left: * & (Blah)
)
=>
(False)
[TestLet]
(Select
$input:*
$filters:* &
(Let ($a):(Foo $input) $a) &
(Let ($a $b $c $ok):(SplitFilters $input $filters) $ok) &
(Let ($filtersA $filtersB $filtersC $filtersD $filtersE $ok):(SplitFilters $input $filters) $ok) &
(Let ($a $b $c $ok):(SplitFilters $input $filters $long $arg $list) $ok) &
(Let ($filtersA $filtersB $filtersC $ok):(SplitFilters $input $filters $long $arg $list $longer) $ok) &
(IsValid (Let ($filtersX $filtersY):(SplitFilters $input $filters) $filtersX)) &
(OuterFunc (InnerFunc (Let ($foo $bar):(SplitFilters $input $filters) $foo))) &
(Let ($foo $bar $baz):(Split (Let ($foo $bar $baz):(SplitAgain $a $b $c) $a)) $foo)
)
=>
(False)
----
----
#
# Generate code for interesting rule.
#
[Relational]
define Select {
# Input comment.
Input RelExpr
Filters FiltersExpr
}
[Relational, Join, JoinNonApply]
define InnerJoin {
Left RelExpr
Right RelExpr
On FiltersExpr
}
[Scalar, Bool, List]
define Filters {
}
[Scalar, Bool, ListItem, ScalarProps]
define FiltersItem {
Condition ScalarExpr
}
# ---
# Unattached comment.
# ---
[PushSelectIntoJoinLeft, Normalize]
(Select | Other
$input:(InnerJoin | InnerJoinApply | SomethingElse | Many
| Things | LooooooooongLine | Much | More | Than
| EightyChars
$left:3
$right:*
$on:*
)
$filters:[
...
$item:* & (IsBoundBy $item $leftCols:(OutputCols $left))
...
]
)
=>
(Select
((OpName $input)
(Select
$left
(ExtractBoundConditions $filters $leftCols)
)
$right
$on
)
(ExtractUnboundConditions $filters $leftCols)
)
[TestSingleLine]
(Select
$input:(InnerJoin | InnerJoinApply $left:* $right:* $on:*)
)
=>
(False)
[TestNestedAnd]
(Select
$right:* &
(HasOuterCols $right) &
^(GroupBy | DistinctOn
$input:*
$aggregations:*
$groupingPrivate:*
) &
(IsUnorderedGrouping $groupingPrivate)
$left:* & (Blah)
)
=>
(False)
[TestLet]
(Select
$input:*
$filters:* &
(Let ($a):(Foo $input) $a) &
(Let ($a $b $c $ok):(SplitFilters $input $filters) $ok) &
(Let
(
$filtersA
$filtersB
$filtersC
$filtersD
$filtersE
$ok
):(SplitFilters $input $filters)
$ok
) &
(Let
($a $b $c $ok):(SplitFilters
$input
$filters
$long
$arg
$list
)
$ok
) &
(Let
($filtersA $filtersB $filtersC $ok):(SplitFilters
$input
$filters
$long
$arg
$list
$longer
)
$ok
) &
(IsValid
(Let
($filtersX $filtersY):(SplitFilters
$input
$filters
)
$filtersX
)
) &
(OuterFunc
(InnerFunc
(Let
($foo $bar):(SplitFilters $input $filters)
$foo
)
)
) &
(Let
($foo $bar $baz):(Split
(Let ($foo $bar $baz):(SplitAgain $a $b $c) $a)
)
$foo
)
)
=>
(False)
----
----
pretty
[Short]
(R) => (O)
----
[Short]
(R)
=>
(O)
# The closing ")" should not be printed on it's own line if the result, "$ok",
# is not printed on it's own line.
pretty
[FoldBinary, Normalize]
(Binary
$left:* & (IsConstValueOrGroupOfConstValues $left)
$right:* &
(IsConstValueOrGroupOfConstValues $right) &
(Let ($result $ok):(FoldBinary (OpName) $left $right) $ok
)
)
=>
$result
----
[FoldBinary, Normalize]
(Binary
$left:* & (IsConstValueOrGroupOfConstValues $left)
$right:* &
(IsConstValueOrGroupOfConstValues $right) &
(Let
($result $ok):(FoldBinary (OpName) $left $right) $ok
)
)
=>
$result
| pkg/sql/opt/optgen/cmd/optfmt/testdata/test | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017786974785849452,
0.00017261842731386423,
0.00016348386998288333,
0.00017237701104022563,
0.000002977836857098737
] |
{
"id": 1,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicles\" (54): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): constraint id was missing for constraint: FOREIGN KEY with name \"fk_vehicle_city_ref_vehicles\"\n",
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 43
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
/*
Package fsm provides an interface for defining and working with finite-state
machines.
The package is split into two main types: Transitions and Machine. Transitions
is an immutable State graph with Events acting as the directed edges between
different States. The graph is built by calling Compile on a Pattern, which is
meant to be done at init time. This pattern is a mapping from current States to
Events that may be applied on those states to resulting Transitions. The pattern
supports pattern matching on States and Events using wildcards and variable
bindings. To add new transitions to the graph, simply adjust the Pattern
provided to Compile. Transitions are not used directly after creation, instead,
they're used by Machine instances.
Machine is an instantiation of a finite-state machine. It is given a Transitions
graph when it is created to specify its State graph. Since the Transition graph
is itself state-less, multiple Machines can be powered by the same graph
simultaneously. The Machine has an Apply(Event) method, which applies the
provided event to its current state. This does two things:
1. It may move the current State to a new State, according to the Transitions
graph.
2. It may apply an Action function on the Machine's ExtendedState, which is
extra state in a Machine that does not contribute to state transition
decisions, but that can be affected by a state transition.
See example_test.go for a full working example of a state machine with an
associated set of states and events.
This package encourages the Pattern to be declared as a map literal. When
declaring this literal, be careful to not declare two equal keys: they'll result
in the second overwriting the first with no warning because of how Go deals with
map literals. Note that keys that are not technically equal, but where one is a
superset of the other, will work as intended. E.g. the following is permitted:
Compile(Pattern{
stateOpen{retryIntent: Any} {
eventTxnFinish{}: {...}
}
stateOpen{retryIntent: True} {
eventRetriableErr{}: {...}
}
Members of this package are accessed frequently when implementing a state
machine. For that reason, it is encouraged to dot-import this package in the
file with the transitions Pattern. The respective file should be kept small and
named <name>_fsm.go; our linter doesn't complain about dot-imports in such
files.
*/
package fsm
| pkg/util/fsm/doc.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001794389681890607,
0.00017095690418500453,
0.00016543194942642003,
0.00017029163427650928,
0.000004505441665969556
] |
{
"id": 2,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.007629964500665665,
0.0007180050015449524,
0.00016343571769539267,
0.00017223868053406477,
0.00191744533367455
] |
{
"id": 2,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log_test
import (
"context"
"testing"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/logtags"
"github.com/stretchr/testify/require"
)
func TestTrace(t *testing.T) {
for _, tc := range []struct {
name string
init func(context.Context) (context.Context, *tracing.Span)
check func(*testing.T, context.Context, tracing.Recording, *tracing.Tracer)
}{
{
name: "verbose",
init: func(ctx context.Context) (context.Context, *tracing.Span) {
tracer := tracing.NewTracer()
sp := tracer.StartSpan("s", tracing.WithRecording(tracing.RecordingVerbose))
ctxWithSpan := tracing.ContextWithSpan(ctx, sp)
return ctxWithSpan, sp
},
check: func(t *testing.T, _ context.Context, rec tracing.Recording, _ *tracing.Tracer) {
if err := tracing.CheckRecordedSpans(rec, `
span: s
tags: _verbose=1
event: test1
event: test2
event: testerr
event: log
`); err != nil {
t.Fatal(err)
}
},
},
{
name: "zipkin",
init: func(ctx context.Context) (context.Context, *tracing.Span) {
tr := tracing.NewTracer()
st := cluster.MakeTestingClusterSettings()
tracing.ZipkinCollector.Override(ctx, &st.SV, "127.0.0.1:9000000")
tr.Configure(ctx, &st.SV)
return tr.StartSpanCtx(context.Background(), "foo")
},
check: func(t *testing.T, ctx context.Context, _ tracing.Recording, tr *tracing.Tracer) {
// This isn't quite a real end-to-end-check, but it is good enough
// to give us confidence that we're really passing log events to
// the span, and the tracing package in turn has tests that verify
// that a span so configured will actually log them to the external
// trace.
require.True(t, tr.HasExternalSink())
require.True(t, log.HasSpanOrEvent(ctx))
require.True(t, log.ExpensiveLogEnabled(ctx, 0 /* level */))
},
},
} {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
// Events to context without a trace should be no-ops.
log.Event(ctx, "should-not-show-up")
ctxWithSpan, sp := tc.init(ctx)
log.Event(ctxWithSpan, "test1")
log.VEvent(ctxWithSpan, log.NoLogV(), "test2")
log.VErrEvent(ctxWithSpan, log.NoLogV(), "testerr")
log.Info(ctxWithSpan, "log")
// Events to parent context should still be no-ops.
log.Event(ctx, "should-not-show-up")
tr := sp.Tracer()
tc.check(t, ctxWithSpan, sp.FinishAndGetRecording(tracing.RecordingVerbose), tr)
})
}
}
func TestTraceWithTags(t *testing.T) {
ctx := context.Background()
ctx = logtags.AddTag(ctx, "tag", 1)
tracer := tracing.NewTracer()
sp := tracer.StartSpan("s", tracing.WithRecording(tracing.RecordingVerbose))
ctxWithSpan := tracing.ContextWithSpan(ctx, sp)
log.Event(ctxWithSpan, "test1")
log.VEvent(ctxWithSpan, log.NoLogV(), "test2")
log.VErrEvent(ctxWithSpan, log.NoLogV(), "testerr")
log.Info(ctxWithSpan, "log")
if err := tracing.CheckRecordedSpans(sp.FinishAndGetRecording(tracing.RecordingVerbose), `
span: s
tags: _verbose=1
event: [tag=1] test1
event: [tag=1] test2
event: [tag=1] testerr
event: [tag=1] log
`); err != nil {
t.Fatal(err)
}
}
| pkg/util/log/trace_client_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017906841821968555,
0.00017241528257727623,
0.00016840978059917688,
0.00017212201782967895,
0.00000267729046754539
] |
{
"id": 2,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
// refreshRangeTBIEnabled controls whether we use a TBI during ranged refreshes.
var refreshRangeTBIEnabled = settings.RegisterBoolSetting(
settings.SystemOnly,
"kv.refresh_range.time_bound_iterators.enabled",
"use time-bound iterators when performing ranged transaction refreshes",
util.ConstantWithMetamorphicTestBool("kv.refresh_range.time_bound_iterators.enabled", true),
)
func init() {
RegisterReadOnlyCommand(roachpb.RefreshRange, DefaultDeclareKeys, RefreshRange)
}
// RefreshRange checks whether the key range specified has any values written in
// the interval (args.RefreshFrom, header.Timestamp].
func RefreshRange(
ctx context.Context, reader storage.Reader, cArgs CommandArgs, resp roachpb.Response,
) (result.Result, error) {
args := cArgs.Args.(*roachpb.RefreshRangeRequest)
h := cArgs.Header
if h.Txn == nil {
return result.Result{}, errors.AssertionFailedf("no transaction specified to %s", args.Method())
}
// We're going to refresh up to the transaction's read timestamp.
if h.Timestamp != h.Txn.WriteTimestamp {
// We're expecting the read and write timestamp to have converged before the
// Refresh request was sent.
log.Fatalf(ctx, "expected provisional commit ts %s == read ts %s. txn: %s", h.Timestamp,
h.Txn.WriteTimestamp, h.Txn)
}
refreshTo := h.Timestamp
refreshFrom := args.RefreshFrom
if refreshFrom.IsEmpty() {
return result.Result{}, errors.AssertionFailedf("empty RefreshFrom: %s", args)
}
log.VEventf(ctx, 2, "refresh %s @[%s-%s]", args.Span(), refreshFrom, refreshTo)
tbi := refreshRangeTBIEnabled.Get(&cArgs.EvalCtx.ClusterSettings().SV)
return result.Result{}, refreshRange(reader, tbi, args.Span(), refreshFrom, refreshTo, h.Txn.ID)
}
// refreshRange iterates over the specified key span until it discovers a value
// written after the refreshFrom timestamp but before or at the refreshTo
// timestamp. The iteration observes MVCC tombstones, which must be considered
// as conflicts during a refresh. The iteration also observes intents, and any
// intent that is not owned by the specified txn ID is considered a conflict.
//
// If such a conflict is found, the function returns an error. Otherwise, no
// error is returned.
func refreshRange(
reader storage.Reader,
timeBoundIterator bool,
span roachpb.Span,
refreshFrom, refreshTo hlc.Timestamp,
txnID uuid.UUID,
) error {
// Construct an incremental iterator with the desired time bounds. Incremental
// iterators will emit MVCC tombstones by default and will emit intents when
// configured to do so (see IntentPolicy).
iter := storage.NewMVCCIncrementalIterator(reader, storage.MVCCIncrementalIterOptions{
EnableTimeBoundIteratorOptimization: timeBoundIterator,
EndKey: span.EndKey,
StartTime: refreshFrom, // exclusive
EndTime: refreshTo, // inclusive
IntentPolicy: storage.MVCCIncrementalIterIntentPolicyEmit,
})
defer iter.Close()
var meta enginepb.MVCCMetadata
iter.SeekGE(storage.MakeMVCCMetadataKey(span.Key))
for {
if ok, err := iter.Valid(); err != nil {
return err
} else if !ok {
break
}
key := iter.Key()
if !key.IsValue() {
// Found an intent. Check whether it is owned by this transaction.
// If so, proceed with iteration. Otherwise, return an error.
if err := protoutil.Unmarshal(iter.UnsafeValue(), &meta); err != nil {
return errors.Wrapf(err, "unmarshaling mvcc meta: %v", key)
}
if meta.IsInline() {
// Ignore inline MVCC metadata. We don't expect to see this in practice
// when performing a refresh of an MVCC keyspace.
iter.Next()
continue
}
if meta.Txn.ID == txnID {
// Ignore the transaction's own intent and skip past the corresponding
// provisional key-value. To do this, iterate to the provisional
// key-value, validate its timestamp, then iterate again.
iter.Next()
if ok, err := iter.Valid(); err != nil {
return errors.Wrap(err, "iterating to provisional value for intent")
} else if !ok {
return errors.Errorf("expected provisional value for intent")
}
if !meta.Timestamp.ToTimestamp().EqOrdering(iter.UnsafeKey().Timestamp) {
return errors.Errorf("expected provisional value for intent with ts %s, found %s",
meta.Timestamp, iter.UnsafeKey().Timestamp)
}
iter.Next()
continue
}
return roachpb.NewRefreshFailedError(roachpb.RefreshFailedError_REASON_INTENT,
key.Key, meta.Txn.WriteTimestamp)
}
// If a committed value is found, return an error.
return roachpb.NewRefreshFailedError(roachpb.RefreshFailedError_REASON_COMMITTED_VALUE,
key.Key, key.Timestamp)
}
return nil
}
| pkg/kv/kvserver/batcheval/cmd_refresh_range.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00025433406699448824,
0.00017721173935569823,
0.00016423885244876146,
0.00017195970576722175,
0.000020856499759247527
] |
{
"id": 2,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"rides\" (55): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_rides\"\n",
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 46
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"fmt"
"os"
"strings"
"github.com/cockroachdb/cockroach/pkg/workload"
// Register the relevant examples
_ "github.com/cockroachdb/cockroach/pkg/workload/examples"
"github.com/cockroachdb/cockroach/pkg/workload/workloadsql"
"github.com/spf13/cobra"
)
var genExamplesCmd = &cobra.Command{
Use: "example-data",
Short: "generate example SQL code suitable for use with CockroachDB",
Long: `This command generates example SQL code that shows various CockroachDB features and
is suitable to populate an example database for demonstration and education purposes.
`,
}
func init() {
for _, meta := range workload.Registered() {
gen := meta.New()
genExampleCmd := &cobra.Command{
Use: meta.Name,
Short: meta.Description,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
runGenExamplesCmd(gen)
return nil
},
}
if f, ok := gen.(workload.Flagser); ok {
genExampleCmd.Flags().AddFlagSet(f.Flags().FlagSet)
}
genExamplesCmd.AddCommand(genExampleCmd)
}
}
func runGenExamplesCmd(gen workload.Generator) {
w := os.Stdout
meta := gen.Meta()
fmt.Fprintf(w, "CREATE DATABASE IF NOT EXISTS %s;\n", meta.Name)
fmt.Fprintf(w, "SET DATABASE=%s;\n", meta.Name)
for _, table := range gen.Tables() {
fmt.Fprintf(w, "DROP TABLE IF EXISTS \"%s\";\n", table.Name)
fmt.Fprintf(w, "CREATE TABLE \"%s\" %s;\n", table.Name, table.Schema)
for rowIdx := 0; rowIdx < table.InitialRows.NumBatches; rowIdx++ {
for _, row := range table.InitialRows.BatchRows(rowIdx) {
rowTuple := strings.Join(workloadsql.StringTuple(row), `,`)
fmt.Fprintf(w, "INSERT INTO \"%s\" VALUES (%s);\n", table.Name, rowTuple)
}
}
}
fmt.Fprint(w, footerComment)
}
const footerComment = `--
--
-- If you can see this message, you probably want to redirect the output of
-- 'cockroach gen example-data' to a file, or pipe it as input to 'cockroach sql'.
`
| pkg/cli/examples.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017884709814097732,
0.000173564360011369,
0.00017025135457515717,
0.00017363969527650625,
0.0000024544390271330485
] |
{
"id": 3,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 50
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0038828605320304632,
0.00044836054439656436,
0.00016177038196474314,
0.00016700627747923136,
0.000953742244746536
] |
{
"id": 3,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 50
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"path/filepath"
"testing"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
const all, latest = true, false
func makeKVT(key roachpb.Key, value []byte, ts hlc.Timestamp) MVCCKeyValue {
return MVCCKeyValue{Key: MVCCKey{Key: key, Timestamp: ts}, Value: value}
}
func makeKVTxn(
key roachpb.Key, val []byte, ts hlc.Timestamp,
) (roachpb.Transaction, roachpb.Value, roachpb.Intent) {
txnID := uuid.MakeV4()
txnMeta := enginepb.TxnMeta{
Key: key,
ID: txnID,
Epoch: 1,
WriteTimestamp: ts,
}
return roachpb.Transaction{
TxnMeta: txnMeta,
ReadTimestamp: ts,
}, roachpb.Value{
RawBytes: val,
}, roachpb.MakeIntent(&txnMeta, key)
}
func intents(intents ...roachpb.Intent) []roachpb.Intent {
return intents
}
func kvs(kvs ...MVCCKeyValue) []MVCCKeyValue {
return kvs
}
func iterateExpectErr(
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
intents []roachpb.Intent,
) func(*testing.T) {
return func(t *testing.T) {
t.Helper()
t.Run("aggregate-intents", func(t *testing.T) {
assertExpectErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents)
})
t.Run("first-intent", func(t *testing.T) {
assertExpectErr(t, e, startKey, endKey, startTime, endTime, revisions, intents[0])
})
t.Run("export-intents", func(t *testing.T) {
assertExportedErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents, false)
})
t.Run("export-intents-tbi", func(t *testing.T) {
assertExportedErrs(t, e, startKey, endKey, startTime, endTime, revisions, intents, true)
})
}
}
func assertExpectErr(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expectedIntent roachpb.Intent,
) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: endKey,
StartTime: startTime,
EndTime: endTime,
})
defer iter.Close()
var iterFn func()
if revisions {
iterFn = iter.Next
} else {
iterFn = iter.NextKey
}
for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() {
if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
// pass
}
_, err := iter.Valid()
if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) {
if !expectedIntent.Key.Equal(intentErr.Intents[0].Key) {
t.Fatalf("Expected intent key %v, but got %v", expectedIntent.Key, intentErr.Intents[0].Key)
}
} else {
t.Fatalf("expected error with intent %v but got %v", expectedIntent, err)
}
}
func assertExpectErrs(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expectedIntents []roachpb.Intent,
) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: endKey,
StartTime: startTime,
EndTime: endTime,
IntentPolicy: MVCCIncrementalIterIntentPolicyAggregate,
})
defer iter.Close()
var iterFn func()
if revisions {
iterFn = iter.Next
} else {
iterFn = iter.NextKey
}
for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() {
if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
// pass
}
if iter.NumCollectedIntents() != len(expectedIntents) {
t.Fatalf("Expected %d intents but found %d", len(expectedIntents), iter.NumCollectedIntents())
}
err := iter.TryGetIntentError()
if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) {
for i := range expectedIntents {
if !expectedIntents[i].Key.Equal(intentErr.Intents[i].Key) {
t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Key, expectedIntents[i].Key)
}
if !expectedIntents[i].Txn.ID.Equal(intentErr.Intents[i].Txn.ID) {
t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Txn.ID, expectedIntents[i].Txn.ID)
}
}
} else {
t.Fatalf("Expected roachpb.WriteIntentError, found %T", err)
}
}
func assertExportedErrs(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expectedIntents []roachpb.Intent,
useTBI bool,
) {
const big = 1 << 30
sstFile := &MemFile{}
_, _, _, err := e.ExportMVCCToSst(context.Background(), ExportOptions{
StartKey: MVCCKey{Key: startKey},
EndKey: endKey,
StartTS: startTime,
EndTS: endTime,
ExportAllRevisions: revisions,
TargetSize: big,
MaxSize: big,
MaxIntents: uint64(MaxIntentsPerWriteIntentError.Default()),
StopMidKey: false,
UseTBI: useTBI,
}, sstFile)
require.Error(t, err)
if intentErr := (*roachpb.WriteIntentError)(nil); errors.As(err, &intentErr) {
for i := range expectedIntents {
if !expectedIntents[i].Key.Equal(intentErr.Intents[i].Key) {
t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Key, expectedIntents[i].Key)
}
if !expectedIntents[i].Txn.ID.Equal(intentErr.Intents[i].Txn.ID) {
t.Fatalf("%d intent key: got %v, expected %v", i, intentErr.Intents[i].Txn.ID, expectedIntents[i].Txn.ID)
}
}
} else {
t.Fatalf("Expected roachpb.WriteIntentError, found %T", err)
}
}
func assertExportedKVs(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expected []MVCCKeyValue,
useTBI bool,
) {
const big = 1 << 30
sstFile := &MemFile{}
_, _, _, err := e.ExportMVCCToSst(context.Background(), ExportOptions{
StartKey: MVCCKey{Key: startKey},
EndKey: endKey,
StartTS: startTime,
EndTS: endTime,
ExportAllRevisions: revisions,
TargetSize: big,
MaxSize: big,
StopMidKey: false,
UseTBI: useTBI,
}, sstFile)
require.NoError(t, err)
data := sstFile.Data()
if data == nil {
require.Nil(t, expected)
return
}
sst, err := NewMemSSTIterator(data, false)
require.NoError(t, err)
defer sst.Close()
sst.SeekGE(MVCCKey{})
for i := range expected {
ok, err := sst.Valid()
require.NoError(t, err)
require.Truef(t, ok, "iteration produced %d keys, expected %d", i, len(expected))
assert.Equalf(t, expected[i].Key, sst.UnsafeKey(), "key %d", i)
if expected[i].Value == nil {
assert.Equalf(t, []byte{}, sst.UnsafeValue(), "key %d %q", i, sst.UnsafeKey())
} else {
assert.Equalf(t, expected[i].Value, sst.UnsafeValue(), "key %d %q", i, sst.UnsafeKey())
}
sst.Next()
}
ok, err := sst.Valid()
require.NoError(t, err)
require.False(t, ok)
}
func nextIgnoreTimeExpectErr(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
errString string,
) {
// The semantics of the methods NextIgnoringTime() should not change whether
// or not we enable the TBI optimization.
for _, useTBI := range []bool{true, false} {
t.Run(fmt.Sprintf("useTBI-%t", useTBI), func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: endKey,
EnableTimeBoundIteratorOptimization: useTBI,
StartTime: startTime,
EndTime: endTime,
})
defer iter.Close()
for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iter.NextIgnoringTime() {
if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
// pass
}
if _, err := iter.Valid(); !testutils.IsError(err, errString) {
t.Fatalf("expected error %q but got %v", errString, err)
}
})
}
}
func assertNextIgnoreTimeIteratedKVs(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
expected []MVCCKeyValue,
) {
// The semantics of the methods NextIgnoringTime() should not change whether
// or not we enable the TBI optimization.
for _, useTBI := range []bool{true, false} {
t.Run(fmt.Sprintf("useTBI-%t", useTBI), func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: endKey,
EnableTimeBoundIteratorOptimization: useTBI,
StartTime: startTime,
EndTime: endTime,
})
defer iter.Close()
var kvs []MVCCKeyValue
for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iter.NextIgnoringTime() {
if ok, err := iter.Valid(); err != nil {
t.Fatalf("unexpected error: %+v", err)
} else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()})
}
if len(kvs) != len(expected) {
t.Fatalf("got %d kvs but expected %d: %v", len(kvs), len(expected), kvs)
}
for i := range kvs {
if !kvs[i].Key.Equal(expected[i].Key) {
t.Fatalf("%d key: got %v but expected %v", i, kvs[i].Key, expected[i].Key)
}
if !bytes.Equal(kvs[i].Value, expected[i].Value) {
t.Fatalf("%d value: got %x but expected %x", i, kvs[i].Value, expected[i].Value)
}
}
})
}
}
func assertIteratedKVs(
t *testing.T,
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expected []MVCCKeyValue,
useTBI bool,
) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: endKey,
EnableTimeBoundIteratorOptimization: useTBI,
StartTime: startTime,
EndTime: endTime,
IntentPolicy: MVCCIncrementalIterIntentPolicyAggregate,
})
defer iter.Close()
var iterFn func()
if revisions {
iterFn = iter.Next
} else {
iterFn = iter.NextKey
}
var kvs []MVCCKeyValue
for iter.SeekGE(MakeMVCCMetadataKey(startKey)); ; iterFn() {
if ok, err := iter.Valid(); err != nil {
t.Fatalf("unexpected error: %+v", err)
} else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
if iter.NumCollectedIntents() > 0 {
t.Fatal("got unexpected intent error")
}
kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()})
}
if len(kvs) != len(expected) {
t.Fatalf("got %d kvs but expected %d: %v", len(kvs), len(expected), kvs)
}
for i := range kvs {
if !kvs[i].Key.Equal(expected[i].Key) {
t.Fatalf("%d key: got %v but expected %v", i, kvs[i].Key, expected[i].Key)
}
if !bytes.Equal(kvs[i].Value, expected[i].Value) {
t.Fatalf("%d value: got %x but expected %x", i, kvs[i].Value, expected[i].Value)
}
}
}
func assertEqualKVs(
e Engine,
startKey, endKey roachpb.Key,
startTime, endTime hlc.Timestamp,
revisions bool,
expected []MVCCKeyValue,
) func(*testing.T) {
return func(t *testing.T) {
t.Helper()
t.Run("iterate", func(t *testing.T) {
assertIteratedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected,
false /* useTBI */)
})
t.Run("iterate-tbi", func(t *testing.T) {
assertIteratedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected,
true /* useTBI */)
})
t.Run("export", func(t *testing.T) {
assertExportedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected,
false /* useTBI */)
})
t.Run("export-tbi", func(t *testing.T) {
assertExportedKVs(t, e, startKey, endKey, startTime, endTime, revisions, expected,
true /* useTBI */)
})
}
}
// TestMVCCIncrementalIteratorNextIgnoringTime tests the iteration semantics of
// the method NextIgnoreTime(). This method is supposed to return all the KVs
// (versions and new keys) that would be encountered in a non-incremental
// iteration.
func TestMVCCIncrementalIteratorNextIgnoringTime(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
var (
keyMax = roachpb.KeyMax
testKey1 = roachpb.Key("/db1")
testKey2 = roachpb.Key("/db2")
testValue1 = []byte("val1")
testValue2 = []byte("val2")
testValue3 = []byte("val3")
testValue4 = []byte("val4")
// Use a non-zero min, since we use IsEmpty to decide if a ts should be used
// as upper/lower-bound during iterator initialization.
tsMin = hlc.Timestamp{WallTime: 0, Logical: 1}
ts1 = hlc.Timestamp{WallTime: 1, Logical: 0}
ts2 = hlc.Timestamp{WallTime: 2, Logical: 0}
ts3 = hlc.Timestamp{WallTime: 3, Logical: 0}
ts4 = hlc.Timestamp{WallTime: 4, Logical: 0}
tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0}
)
kv1_1_1 := makeKVT(testKey1, testValue1, ts1)
kv1_2_2 := makeKVT(testKey1, testValue2, ts2)
kv2_2_2 := makeKVT(testKey2, testValue3, ts2)
kv2_4_4 := makeKVT(testKey2, testValue4, ts4)
kv1_3Deleted := makeKVT(testKey1, nil, ts3)
for _, engineImpl := range mvccEngineImpls {
t.Run(engineImpl.name, func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
t.Run("empty", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts3, nil)
})
for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) {
v := roachpb.Value{RawBytes: kv.Value}
if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil {
t.Fatal(err)
}
}
// Exercise time ranges.
t.Run("ts (0-0]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, tsMin, nil)
})
// Returns the kv_2_2_2 even though it is outside (startTime, endTime].
t.Run("ts (0-1]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts1, kvs(kv1_1_1, kv2_2_2))
})
t.Run("ts (0-∞]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, tsMax, kvs(kv1_2_2, kv1_1_1,
kv2_2_2))
})
t.Run("ts (1-1]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, ts1, nil)
})
// Returns the kv_1_1_1 even though it is outside (startTime, endTime].
t.Run("ts (1-2]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, ts2, kvs(kv1_2_2, kv1_1_1,
kv2_2_2))
})
t.Run("ts (2-2]", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts2, ts2, nil)
})
// Exercise key ranges.
t.Run("kv [1-1)", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, testKey1, testKey1, tsMin, tsMax, nil)
})
t.Run("kv [1-2)", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, testKey1, testKey2, tsMin, tsMax, kvs(kv1_2_2, kv1_1_1))
})
// Exercise deletion.
if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil {
t.Fatal(err)
}
// Returns the kv_1_1_1 even though it is outside (startTime, endTime].
t.Run("del", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts1, tsMax, kvs(kv1_3Deleted,
kv1_2_2, kv1_1_1, kv2_2_2))
})
// Insert an intent of testKey2.
txn1ID := uuid.MakeV4()
txn1 := roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
Key: testKey2,
ID: txn1ID,
Epoch: 1,
WriteTimestamp: ts4,
},
ReadTimestamp: ts4,
}
txn1Val := roachpb.Value{RawBytes: testValue4}
if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil {
t.Fatal(err)
}
// We have to be careful that we are testing the intent handling logic of
// NextIgnoreTime() rather than the first SeekGE(). We do this by
// ensuring that the SeekGE() doesn't encounter an intent.
t.Run("intents", func(t *testing.T) {
nextIgnoreTimeExpectErr(t, e, testKey1, testKey2.PrefixEnd(), tsMin, tsMax, "conflicting intents")
})
t.Run("intents", func(t *testing.T) {
nextIgnoreTimeExpectErr(t, e, localMax, keyMax, tsMin, ts4, "conflicting intents")
})
// Intents above the upper time bound or beneath the lower time bound must
// be ignored. Note that the lower time bound is exclusive while the upper
// time bound is inclusive.
//
// The intent at ts=4 for kv2 lies outside the timespan
// (startTime, endTime] so we do not raise an error and just move on to
// its versioned KV.
t.Run("intents", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, tsMin, ts3, kvs(kv1_3Deleted,
kv1_2_2, kv1_1_1, kv2_4_4, kv2_2_2))
})
t.Run("intents", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts4, tsMax, kvs())
})
t.Run("intents", func(t *testing.T) {
assertNextIgnoreTimeIteratedKVs(t, e, localMax, keyMax, ts4.Next(), tsMax, kvs())
})
})
}
}
func TestMVCCIncrementalIteratorInlinePolicy(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
var (
keyMax = roachpb.KeyMax
testKey1 = roachpb.Key("/db1")
testKey2 = roachpb.Key("/db2")
testValue1 = []byte("val1")
testValue2 = []byte("val2")
// Use a non-zero min, since we use IsEmpty to decide if a ts should be used
// as upper/lower-bound during iterator initialization.
tsMin = hlc.Timestamp{WallTime: 0, Logical: 1}
ts1 = hlc.Timestamp{WallTime: 1, Logical: 0}
ts2 = hlc.Timestamp{WallTime: 2, Logical: 0}
tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0}
)
inline1_1_1 := makeKVT(testKey1, testValue1, hlc.Timestamp{})
kv2_1_1 := makeKVT(testKey2, testValue1, ts1)
kv2_2_2 := makeKVT(testKey2, testValue2, ts2)
for _, engineImpl := range mvccEngineImpls {
e := engineImpl.create()
defer e.Close()
for _, kv := range []MVCCKeyValue{inline1_1_1, kv2_1_1, kv2_2_2} {
v := roachpb.Value{RawBytes: kv.Value}
if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil {
t.Fatal(err)
}
}
t.Run(engineImpl.name, func(t *testing.T) {
t.Run("PolicyError returns error if inline value is found", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: tsMin,
EndTime: tsMax,
InlinePolicy: MVCCIncrementalIterInlinePolicyError,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
_, err := iter.Valid()
assert.EqualError(t, err, "unexpected inline value found: \"/db1\"")
})
t.Run("PolicyEmit returns inline values to caller", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: tsMin,
EndTime: tsMax,
InlinePolicy: MVCCIncrementalIterInlinePolicyEmit,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
expectInlineKeyValue(t, iter, inline1_1_1)
iter.Next()
expectKeyValue(t, iter, kv2_2_2)
iter.Next()
expectKeyValue(t, iter, kv2_1_1)
})
})
}
}
func TestMVCCIncrementalIteratorIntentPolicy(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
var (
keyMax = roachpb.KeyMax
testKey1 = roachpb.Key("/db1")
testKey2 = roachpb.Key("/db2")
testValue1 = []byte("val1")
testValue2 = []byte("val2")
testValue3 = []byte("val3")
// Use a non-zero min, since we use IsEmpty to decide if a ts should be used
// as upper/lower-bound during iterator initialization.
tsMin = hlc.Timestamp{WallTime: 0, Logical: 1}
ts1 = hlc.Timestamp{WallTime: 1, Logical: 0}
ts2 = hlc.Timestamp{WallTime: 2, Logical: 0}
ts3 = hlc.Timestamp{WallTime: 3, Logical: 0}
tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0}
)
makeTxn := func(key roachpb.Key, val []byte, ts hlc.Timestamp) (roachpb.Transaction, roachpb.Value, roachpb.Intent) {
txnID := uuid.MakeV4()
txnMeta := enginepb.TxnMeta{
Key: key,
ID: txnID,
Epoch: 1,
WriteTimestamp: ts,
}
return roachpb.Transaction{
TxnMeta: txnMeta,
ReadTimestamp: ts,
}, roachpb.Value{
RawBytes: val,
}, roachpb.MakeIntent(&txnMeta, key)
}
kv1_1_1 := makeKVT(testKey1, testValue1, ts1)
kv1_2_2 := makeKVT(testKey1, testValue2, ts2)
kv1_3_3 := makeKVT(testKey1, testValue3, ts3)
kv2_1_1 := makeKVT(testKey2, testValue1, ts1)
kv2_2_2 := makeKVT(testKey2, testValue2, ts2)
txn, val, intent2_2_2 := makeTxn(testKey2, testValue2, ts2)
intentErr := &roachpb.WriteIntentError{Intents: []roachpb.Intent{intent2_2_2}}
for _, engineImpl := range mvccEngineImpls {
e := engineImpl.create()
defer e.Close()
for _, kv := range []MVCCKeyValue{kv1_1_1, kv1_2_2, kv1_3_3, kv2_1_1} {
v := roachpb.Value{RawBytes: kv.Value}
if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil {
t.Fatal(err)
}
}
if err := MVCCPut(ctx, e, nil, txn.TxnMeta.Key, txn.ReadTimestamp, val, &txn); err != nil {
t.Fatal(err)
}
t.Run(engineImpl.name, func(t *testing.T) {
t.Run("PolicyError returns error if an intent is in the time range", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: tsMin,
EndTime: tsMax,
IntentPolicy: MVCCIncrementalIterIntentPolicyError,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
for ; ; iter.Next() {
if ok, _ := iter.Valid(); !ok || iter.UnsafeKey().Key.Compare(keyMax) >= 0 {
break
}
}
_, err := iter.Valid()
assert.EqualError(t, err, intentErr.Error())
})
t.Run("PolicyError ignores intents outside of time range", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: ts2,
EndTime: tsMax,
IntentPolicy: MVCCIncrementalIterIntentPolicyError,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
expectKeyValue(t, iter, kv1_3_3)
iter.Next()
valid, err := iter.Valid()
assert.NoError(t, err)
assert.False(t, valid)
})
t.Run("PolicyEmit returns inline values to caller", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: tsMin,
EndTime: tsMax,
IntentPolicy: MVCCIncrementalIterIntentPolicyEmit,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
for _, kv := range []MVCCKeyValue{kv1_3_3, kv1_2_2, kv1_1_1} {
expectKeyValue(t, iter, kv)
iter.Next()
}
expectIntent(t, iter, intent2_2_2)
iter.Next()
expectKeyValue(t, iter, kv2_2_2)
iter.Next()
expectKeyValue(t, iter, kv2_1_1)
})
t.Run("PolicyEmit ignores intents outside of time range", func(t *testing.T) {
iter := NewMVCCIncrementalIterator(e, MVCCIncrementalIterOptions{
EndKey: keyMax,
StartTime: ts2,
EndTime: tsMax,
IntentPolicy: MVCCIncrementalIterIntentPolicyEmit,
})
defer iter.Close()
iter.SeekGE(MakeMVCCMetadataKey(testKey1))
expectKeyValue(t, iter, kv1_3_3)
iter.Next()
valid, err := iter.Valid()
assert.NoError(t, err)
assert.False(t, valid)
})
})
}
}
func expectKeyValue(t *testing.T, iter SimpleMVCCIterator, kv MVCCKeyValue) {
valid, err := iter.Valid()
assert.True(t, valid, "expected valid iterator")
assert.NoError(t, err)
unsafeKey := iter.UnsafeKey()
unsafeVal := iter.UnsafeValue()
assert.True(t, unsafeKey.Key.Equal(kv.Key.Key), "keys not equal")
assert.Equal(t, kv.Key.Timestamp, unsafeKey.Timestamp)
assert.Equal(t, kv.Value, unsafeVal)
}
func expectInlineKeyValue(t *testing.T, iter SimpleMVCCIterator, kv MVCCKeyValue) {
valid, err := iter.Valid()
assert.True(t, valid)
assert.NoError(t, err)
unsafeKey := iter.UnsafeKey()
unsafeVal := iter.UnsafeValue()
var meta enginepb.MVCCMetadata
err = protoutil.Unmarshal(unsafeVal, &meta)
require.NoError(t, err)
assert.True(t, meta.IsInline())
assert.False(t, unsafeKey.IsValue())
assert.True(t, unsafeKey.Key.Equal(kv.Key.Key))
assert.Equal(t, kv.Value, meta.RawBytes)
}
func expectIntent(t *testing.T, iter SimpleMVCCIterator, intent roachpb.Intent) {
valid, err := iter.Valid()
assert.True(t, valid)
assert.NoError(t, err)
unsafeKey := iter.UnsafeKey()
unsafeVal := iter.UnsafeValue()
var meta enginepb.MVCCMetadata
err = protoutil.Unmarshal(unsafeVal, &meta)
require.NoError(t, err)
assert.NotNil(t, meta.Txn)
assert.False(t, unsafeKey.IsValue())
assert.True(t, unsafeKey.Key.Equal(intent.Key))
assert.Equal(t, meta.Timestamp, intent.Txn.WriteTimestamp.ToLegacyTimestamp())
}
func TestMVCCIncrementalIterator(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
var (
keyMax = roachpb.KeyMax
testKey1 = roachpb.Key("/db1")
testKey2 = roachpb.Key("/db2")
testValue1 = []byte("val1")
testValue2 = []byte("val2")
testValue3 = []byte("val3")
testValue4 = []byte("val4")
// Use a non-zero min, since we use IsEmpty to decide if a ts should be used
// as upper/lower-bound during iterator initialization.
tsMin = hlc.Timestamp{WallTime: 0, Logical: 1}
ts1 = hlc.Timestamp{WallTime: 1, Logical: 0}
ts2 = hlc.Timestamp{WallTime: 2, Logical: 0}
ts3 = hlc.Timestamp{WallTime: 3, Logical: 0}
ts4 = hlc.Timestamp{WallTime: 4, Logical: 0}
tsMax = hlc.Timestamp{WallTime: math.MaxInt64, Logical: 0}
)
// Keys are named as kv<key>_<value>_<ts>.
kv1_1_1 := makeKVT(testKey1, testValue1, ts1)
kv1_4_4 := makeKVT(testKey1, testValue4, ts4)
kv1_2_2 := makeKVT(testKey1, testValue2, ts2)
kv2_2_2 := makeKVT(testKey2, testValue3, ts2)
kv1Deleted3 := makeKVT(testKey1, nil, ts3)
for _, engineImpl := range mvccEngineImpls {
t.Run(engineImpl.name+"-latest", func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
t.Run("empty", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, latest, nil))
for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) {
v := roachpb.Value{RawBytes: kv.Value}
if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil {
t.Fatal(err)
}
}
// Exercise time ranges.
t.Run("ts (0-0]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMin, latest, nil))
t.Run("ts (0-1]", assertEqualKVs(e, localMax, keyMax, tsMin, ts1, latest, kvs(kv1_1_1)))
t.Run("ts (0-∞]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, latest, kvs(kv1_2_2, kv2_2_2)))
t.Run("ts (1-1]", assertEqualKVs(e, localMax, keyMax, ts1, ts1, latest, nil))
t.Run("ts (1-2]", assertEqualKVs(e, localMax, keyMax, ts1, ts2, latest, kvs(kv1_2_2, kv2_2_2)))
t.Run("ts (2-2]", assertEqualKVs(e, localMax, keyMax, ts2, ts2, latest, nil))
// Exercise key ranges.
t.Run("kv [1-1)", assertEqualKVs(e, testKey1, testKey1, tsMin, tsMax, latest, nil))
t.Run("kv [1-2)", assertEqualKVs(e, testKey1, testKey2, tsMin, tsMax, latest, kvs(kv1_2_2)))
// Exercise deletion.
if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil {
t.Fatal(err)
}
t.Run("del", assertEqualKVs(e, localMax, keyMax, ts1, tsMax, latest, kvs(kv1Deleted3, kv2_2_2)))
// Exercise intent handling.
txn1, txn1Val, intentErr1 := makeKVTxn(testKey1, testValue4, ts4)
if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil {
t.Fatal(err)
}
txn2, txn2Val, intentErr2 := makeKVTxn(testKey2, testValue4, ts4)
if err := MVCCPut(ctx, e, nil, txn2.TxnMeta.Key, txn2.ReadTimestamp, txn2Val, &txn2); err != nil {
t.Fatal(err)
}
t.Run("intents-1",
iterateExpectErr(e, testKey1, testKey1.PrefixEnd(), tsMin, tsMax, latest, intents(intentErr1)))
t.Run("intents-2",
iterateExpectErr(e, testKey2, testKey2.PrefixEnd(), tsMin, tsMax, latest, intents(intentErr2)))
t.Run("intents-multi",
iterateExpectErr(e, localMax, keyMax, tsMin, ts4, latest, intents(intentErr1, intentErr2)))
// Intents above the upper time bound or beneath the lower time bound must
// be ignored (#28358). Note that the lower time bound is exclusive while
// the upper time bound is inclusive.
t.Run("intents-filtered-1", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, latest, kvs(kv1Deleted3, kv2_2_2)))
t.Run("intents-filtered-2", assertEqualKVs(e, localMax, keyMax, ts4, tsMax, latest, kvs()))
t.Run("intents-filtered-3", assertEqualKVs(e, localMax, keyMax, ts4.Next(), tsMax, latest, kvs()))
intent1 := roachpb.MakeLockUpdate(&txn1, roachpb.Span{Key: testKey1})
intent1.Status = roachpb.COMMITTED
if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent1); err != nil {
t.Fatal(err)
}
intent2 := roachpb.MakeLockUpdate(&txn2, roachpb.Span{Key: testKey2})
intent2.Status = roachpb.ABORTED
if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent2); err != nil {
t.Fatal(err)
}
t.Run("intents-resolved", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, latest, kvs(kv1_4_4, kv2_2_2)))
})
}
for _, engineImpl := range mvccEngineImpls {
t.Run(engineImpl.name+"-all", func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
t.Run("empty", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, all, nil))
for _, kv := range kvs(kv1_1_1, kv1_2_2, kv2_2_2) {
v := roachpb.Value{RawBytes: kv.Value}
if err := MVCCPut(ctx, e, nil, kv.Key.Key, kv.Key.Timestamp, v, nil); err != nil {
t.Fatal(err)
}
}
// Exercise time ranges.
t.Run("ts (0-0]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMin, all, nil))
t.Run("ts (0-1]", assertEqualKVs(e, localMax, keyMax, tsMin, ts1, all, kvs(kv1_1_1)))
t.Run("ts (0-∞]", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, all, kvs(kv1_2_2, kv1_1_1, kv2_2_2)))
t.Run("ts (1-1]", assertEqualKVs(e, localMax, keyMax, ts1, ts1, all, nil))
t.Run("ts (1-2]", assertEqualKVs(e, localMax, keyMax, ts1, ts2, all, kvs(kv1_2_2, kv2_2_2)))
t.Run("ts (2-2]", assertEqualKVs(e, localMax, keyMax, ts2, ts2, all, nil))
// Exercise key ranges.
t.Run("kv [1-1)", assertEqualKVs(e, testKey1, testKey1, tsMin, tsMax, all, nil))
t.Run("kv [1-2)", assertEqualKVs(e, testKey1, testKey2, tsMin, tsMax, all, kvs(kv1_2_2, kv1_1_1)))
// Exercise deletion.
if err := MVCCDelete(ctx, e, nil, testKey1, ts3, nil); err != nil {
t.Fatal(err)
}
t.Run("del", assertEqualKVs(e, localMax, keyMax, ts1, tsMax, all, kvs(kv1Deleted3, kv1_2_2, kv2_2_2)))
// Exercise intent handling.
txn1, txn1Val, intentErr1 := makeKVTxn(testKey1, testValue4, ts4)
if err := MVCCPut(ctx, e, nil, txn1.TxnMeta.Key, txn1.ReadTimestamp, txn1Val, &txn1); err != nil {
t.Fatal(err)
}
txn2, txn2Val, intentErr2 := makeKVTxn(testKey2, testValue4, ts4)
if err := MVCCPut(ctx, e, nil, txn2.TxnMeta.Key, txn2.ReadTimestamp, txn2Val, &txn2); err != nil {
t.Fatal(err)
}
// Single intent tests are verifying behavior when intent collection is not enabled.
t.Run("intents-1",
iterateExpectErr(e, testKey1, testKey1.PrefixEnd(), tsMin, tsMax, all, intents(intentErr1)))
t.Run("intents-2",
iterateExpectErr(e, testKey2, testKey2.PrefixEnd(), tsMin, tsMax, all, intents(intentErr2)))
t.Run("intents-multi",
iterateExpectErr(e, localMax, keyMax, tsMin, ts4, all, intents(intentErr1, intentErr2)))
// Intents above the upper time bound or beneath the lower time bound must
// be ignored (#28358). Note that the lower time bound is exclusive while
// the upper time bound is inclusive.
t.Run("intents-filtered-1", assertEqualKVs(e, localMax, keyMax, tsMin, ts3, all, kvs(kv1Deleted3, kv1_2_2, kv1_1_1, kv2_2_2)))
t.Run("intents-filtered-2", assertEqualKVs(e, localMax, keyMax, ts4, tsMax, all, kvs()))
t.Run("intents-filtered-3", assertEqualKVs(e, localMax, keyMax, ts4.Next(), tsMax, all, kvs()))
intent1 := roachpb.MakeLockUpdate(&txn1, roachpb.Span{Key: testKey1})
intent1.Status = roachpb.COMMITTED
if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent1); err != nil {
t.Fatal(err)
}
intent2 := roachpb.MakeLockUpdate(&txn2, roachpb.Span{Key: testKey2})
intent2.Status = roachpb.ABORTED
if _, err := MVCCResolveWriteIntent(ctx, e, nil, intent2); err != nil {
t.Fatal(err)
}
t.Run("intents-resolved", assertEqualKVs(e, localMax, keyMax, tsMin, tsMax, all, kvs(kv1_4_4, kv1Deleted3, kv1_2_2, kv1_1_1, kv2_2_2)))
})
}
}
func slurpKVsInTimeRange(
reader Reader, prefix roachpb.Key, startTime, endTime hlc.Timestamp,
) ([]MVCCKeyValue, error) {
endKey := prefix.PrefixEnd()
iter := NewMVCCIncrementalIterator(reader, MVCCIncrementalIterOptions{
EndKey: endKey,
StartTime: startTime,
EndTime: endTime,
})
defer iter.Close()
var kvs []MVCCKeyValue
for iter.SeekGE(MakeMVCCMetadataKey(prefix)); ; iter.Next() {
if ok, err := iter.Valid(); err != nil {
return nil, err
} else if !ok || iter.UnsafeKey().Key.Compare(endKey) >= 0 {
break
}
kvs = append(kvs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()})
}
return kvs, nil
}
// TestMVCCIncrementalIteratorIntentRewrittenConcurrently verifies that the
// workaround in MVCCIncrementalIterator to double-check for deleted intents
// properly handles cases where an intent originally in a time-bound iterator's
// time range is rewritten at a timestamp outside of its time range.
func TestMVCCIncrementalIteratorIntentRewrittenConcurrently(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
for _, engineImpl := range mvccEngineImpls {
t.Run(engineImpl.name, func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
// Create a DB containing a single intent.
ctx := context.Background()
kA := roachpb.Key("kA")
vA1 := roachpb.MakeValueFromString("vA1")
vA2 := roachpb.MakeValueFromString("vA2")
ts0 := hlc.Timestamp{WallTime: 0}
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
ts3 := hlc.Timestamp{WallTime: 3}
txn := &roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
Key: roachpb.Key("b"),
ID: uuid.MakeV4(),
Epoch: 1,
WriteTimestamp: ts1,
Sequence: 1,
},
ReadTimestamp: ts1,
}
if err := MVCCPut(ctx, e, nil, kA, ts1, vA1, txn); err != nil {
t.Fatal(err)
}
// Concurrently iterate over the intent using a time-bound iterator and move
// the intent out of the time-bound iterator's time range by writing to it
// again at a higher timestamp.
g, _ := errgroup.WithContext(ctx)
g.Go(func() error {
// Re-write the intent with a higher timestamp.
txn.WriteTimestamp = ts3
txn.Sequence = 2
// Use a batch since MVCCPut is not atomic when using an Engine and we
// are not using latches to prevent a concurrent read in the other
// goroutine. A non-atomic Put can cause the strict invariant checking
// in intentInterleavingIter to be violated.
b := e.NewBatch()
defer b.Close()
if err := MVCCPut(ctx, b, nil, kA, ts1, vA2, txn); err != nil {
return err
}
return b.Commit(false)
})
g.Go(func() error {
// Iterate with a time range that includes the initial intent but does
// not include the new intent.
kvs, err := slurpKVsInTimeRange(e, kA, ts0, ts2)
// There are two permissible outcomes from the scan. If the iteration
// wins the race with the put that moves the intent then it should
// observe the intent and return a write intent error. If the iteration
// loses the race with the put that moves the intent then it should
// observe and return nothing because there will be no committed or
// provisional keys in its time range.
if err != nil {
if !testutils.IsError(err, `conflicting intents on "kA"`) {
return err
}
} else {
if len(kvs) != 0 {
return errors.Errorf(`unexpected kvs: %v`, kvs)
}
}
return nil
})
if err := g.Wait(); err != nil {
t.Fatal(err)
}
})
}
}
// TestMVCCIncrementalIteratorIntentDeletion checks a workaround in
// MVCCIncrementalIterator for a bug in time-bound iterators, where an intent
// has been deleted, but the time-bound iterator doesn't see the deletion.
func TestMVCCIncrementalIteratorIntentDeletion(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
txn := func(key roachpb.Key, ts hlc.Timestamp) *roachpb.Transaction {
return &roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
Key: key,
ID: uuid.MakeV4(),
Epoch: 1,
WriteTimestamp: ts,
},
ReadTimestamp: ts,
}
}
intent := func(txn *roachpb.Transaction) roachpb.LockUpdate {
intent := roachpb.MakeLockUpdate(txn, roachpb.Span{Key: txn.Key})
intent.Status = roachpb.COMMITTED
return intent
}
ctx := context.Background()
kA := roachpb.Key("kA")
vA1 := roachpb.MakeValueFromString("vA1")
vA2 := roachpb.MakeValueFromString("vA2")
vA3 := roachpb.MakeValueFromString("vA3")
kB := roachpb.Key("kB")
vB1 := roachpb.MakeValueFromString("vB1")
kC := roachpb.Key("kC")
vC1 := roachpb.MakeValueFromString("vC1")
ts0 := hlc.Timestamp{WallTime: 0}
ts1 := hlc.Timestamp{WallTime: 1}
ts2 := hlc.Timestamp{WallTime: 2}
ts3 := hlc.Timestamp{WallTime: 3}
txnA1 := txn(kA, ts1)
txnA3 := txn(kA, ts3)
txnB1 := txn(kB, ts1)
txnC1 := txn(kC, ts1)
db := createTestPebbleEngine()
defer db.Close()
// Set up two sstables very specifically:
//
// sst1 (time-bound metadata ts1->ts1)
// kA -> (intent)
// kA:1 -> vA1
// kB -> (intent)
// kB:1 -> vB1
// kC -> (intent)
// kC:1 -> vC1
//
// sst2 (time-bound metadata ts2->ts3) the intent deletions are for the
// intents at ts1, but there's no way know that when constructing the
// metadata (hence the time-bound iterator bug)
// kA -> (intent) [NB this overwrites the intent deletion]
// kA:3 -> vA3
// kA:2 -> vA2
// kB -> (intent deletion)
require.NoError(t, MVCCPut(ctx, db, nil, kA, txnA1.ReadTimestamp, vA1, txnA1))
require.NoError(t, MVCCPut(ctx, db, nil, kB, txnB1.ReadTimestamp, vB1, txnB1))
require.NoError(t, MVCCPut(ctx, db, nil, kC, txnC1.ReadTimestamp, vC1, txnC1))
require.NoError(t, db.Flush())
require.NoError(t, db.Compact())
_, err := MVCCResolveWriteIntent(ctx, db, nil, intent(txnA1))
require.NoError(t, err)
_, err = MVCCResolveWriteIntent(ctx, db, nil, intent(txnB1))
require.NoError(t, err)
require.NoError(t, MVCCPut(ctx, db, nil, kA, ts2, vA2, nil))
require.NoError(t, MVCCPut(ctx, db, nil, kA, txnA3.WriteTimestamp, vA3, txnA3))
require.NoError(t, db.Flush())
// The kA ts1 intent has been resolved. There's now a new intent on kA, but
// the timestamp (ts3) is too new so it should be ignored.
kvs, err := slurpKVsInTimeRange(db, kA, ts0, ts1)
require.NoError(t, err)
require.Equal(t, []MVCCKeyValue{
{Key: MVCCKey{Key: kA, Timestamp: ts1}, Value: vA1.RawBytes},
}, kvs)
// kA has a value at ts2. Again the intent is too new (ts3), so ignore.
kvs, err = slurpKVsInTimeRange(db, kA, ts0, ts2)
require.NoError(t, err)
require.Equal(t, []MVCCKeyValue{
{Key: MVCCKey{Key: kA, Timestamp: ts2}, Value: vA2.RawBytes},
{Key: MVCCKey{Key: kA, Timestamp: ts1}, Value: vA1.RawBytes},
}, kvs)
// At ts3, we should see the new intent
_, err = slurpKVsInTimeRange(db, kA, ts0, ts3)
require.EqualError(t, err, `conflicting intents on "kA"`)
// Similar to the kA ts1 check, but there is no newer intent. We expect to
// pick up the intent deletion and it should cancel out the intent, leaving
// only the value at ts1.
kvs, err = slurpKVsInTimeRange(db, kB, ts0, ts1)
require.NoError(t, err)
require.Equal(t, []MVCCKeyValue{
{Key: MVCCKey{Key: kB, Timestamp: ts1}, Value: vB1.RawBytes},
}, kvs)
// Sanity check that we see the still unresolved intent for kC ts1.
_, err = slurpKVsInTimeRange(db, kC, ts0, ts1)
require.EqualError(t, err, `conflicting intents on "kC"`)
}
func TestMVCCIncrementalIteratorIntentStraddlesSStables(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Create a DB containing 2 keys, a and b, where b has an intent. We use the
// regular MVCCPut operation to generate these keys, which we'll later be
// copying into manually created sstables.
ctx := context.Background()
db1, err := Open(ctx, InMemory(), ForTesting)
require.NoError(t, err)
defer db1.Close()
put := func(key, value string, ts int64, txn *roachpb.Transaction) {
v := roachpb.MakeValueFromString(value)
if err := MVCCPut(
ctx, db1, nil, roachpb.Key(key), hlc.Timestamp{WallTime: ts}, v, txn,
); err != nil {
t.Fatal(err)
}
}
put("a", "a value", 1, nil)
put("b", "b value", 2, &roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
Key: roachpb.Key("b"),
ID: uuid.MakeV4(),
Epoch: 1,
WriteTimestamp: hlc.Timestamp{WallTime: 2},
},
ReadTimestamp: hlc.Timestamp{WallTime: 2},
})
// Create a second DB in which we'll create a specific SSTable structure: the
// first SSTable contains 2 KVs where the first is a regular versioned key
// and the second is the MVCC metadata entry (i.e. an intent). The next
// SSTable contains the provisional value for the intent. The effect is that
// the metadata entry is separated from the entry it is metadata for.
//
// SSTable 1:
// a@1
// b@<meta>
//
// SSTable 2:
// b@2
db2, err := Open(ctx, InMemory(), ForTesting)
require.NoError(t, err)
defer db2.Close()
// NB: If the original intent was separated, iterating using an interleaving
// iterator, as done below, and writing to an sst, transforms the separated
// intent to an interleaved intent. This is ok for now since both kinds of
// intents are supported.
// TODO(sumeer): change this test before interleaved intents are disallowed.
ingest := func(it MVCCIterator, count int) {
memFile := &MemFile{}
sst := MakeIngestionSSTWriter(memFile)
defer sst.Close()
for i := 0; i < count; i++ {
ok, err := it.Valid()
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("expected key")
}
if err := sst.Put(it.Key(), it.Value()); err != nil {
t.Fatal(err)
}
it.Next()
}
if err := sst.Finish(); err != nil {
t.Fatal(err)
}
if err := db2.WriteFile(`ingest`, memFile.Data()); err != nil {
t.Fatal(err)
}
if err := db2.IngestExternalFiles(ctx, []string{`ingest`}); err != nil {
t.Fatal(err)
}
}
{
// Iterate over the entries in the first DB, ingesting them into SSTables
// in the second DB.
it := db1.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{
UpperBound: keys.MaxKey,
})
defer it.Close()
it.SeekGE(MVCCKey{Key: keys.LocalMax})
ingest(it, 2)
ingest(it, 1)
}
{
// Use an incremental iterator to simulate an incremental backup from (1,
// 2]. Note that incremental iterators are exclusive on the start time and
// inclusive on the end time. The expectation is that we'll see a write
// intent error.
it := NewMVCCIncrementalIterator(db2, MVCCIncrementalIterOptions{
EndKey: keys.MaxKey,
StartTime: hlc.Timestamp{WallTime: 1},
EndTime: hlc.Timestamp{WallTime: 2},
})
defer it.Close()
for it.SeekGE(MVCCKey{Key: keys.LocalMax}); ; it.Next() {
ok, err := it.Valid()
if err != nil {
if errors.HasType(err, (*roachpb.WriteIntentError)(nil)) {
// This is the write intent error we were expecting.
return
}
t.Fatalf("%T: %s", err, err)
}
if !ok {
break
}
}
t.Fatalf("expected write intent error, but found success")
}
}
func TestMVCCIterateTimeBound(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
dir, cleanupFn := testutils.TempDir(t)
defer cleanupFn()
const numKeys = 1000
const numBatches = 10
const batchTimeSpan = 10
const valueSize = 8
eng, err := loadTestData(filepath.Join(dir, "mvcc_data"),
numKeys, numBatches, batchTimeSpan, valueSize)
if err != nil {
t.Fatal(err)
}
defer eng.Close()
for _, testCase := range []struct {
start hlc.Timestamp
end hlc.Timestamp
}{
// entire time range
{hlc.Timestamp{WallTime: 0, Logical: 0}, hlc.Timestamp{WallTime: 110, Logical: 0}},
// one SST
{hlc.Timestamp{WallTime: 10, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}},
// one SST, plus the min of the following SST
{hlc.Timestamp{WallTime: 10, Logical: 0}, hlc.Timestamp{WallTime: 20, Logical: 0}},
// one SST, plus the max of the preceding SST
{hlc.Timestamp{WallTime: 9, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}},
// one SST, plus the min of the following and the max of the preceding SST
{hlc.Timestamp{WallTime: 9, Logical: 0}, hlc.Timestamp{WallTime: 21, Logical: 0}},
// one SST, not min or max
{hlc.Timestamp{WallTime: 17, Logical: 0}, hlc.Timestamp{WallTime: 18, Logical: 0}},
// one SST's max
{hlc.Timestamp{WallTime: 18, Logical: 0}, hlc.Timestamp{WallTime: 19, Logical: 0}},
// one SST's min
{hlc.Timestamp{WallTime: 19, Logical: 0}, hlc.Timestamp{WallTime: 20, Logical: 0}},
// random endpoints
{hlc.Timestamp{WallTime: 32, Logical: 0}, hlc.Timestamp{WallTime: 78, Logical: 0}},
} {
t.Run(fmt.Sprintf("%s-%s", testCase.start, testCase.end), func(t *testing.T) {
defer leaktest.AfterTest(t)()
expectedKVs := collectMatchingWithMVCCIterator(t, eng, testCase.start, testCase.end)
assertEqualKVs(eng, keys.LocalMax, keys.MaxKey, testCase.start, testCase.end, latest, expectedKVs)(t)
})
}
}
func collectMatchingWithMVCCIterator(
t *testing.T, eng Engine, start, end hlc.Timestamp,
) []MVCCKeyValue {
var expectedKVs []MVCCKeyValue
iter := eng.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: roachpb.KeyMax})
defer iter.Close()
iter.SeekGE(MVCCKey{Key: localMax})
for {
ok, err := iter.Valid()
if err != nil {
t.Fatal(err)
} else if !ok {
break
}
ts := iter.Key().Timestamp
if (ts.Less(end) || end == ts) && start.Less(ts) {
expectedKVs = append(expectedKVs, MVCCKeyValue{Key: iter.Key(), Value: iter.Value()})
}
iter.Next()
}
if len(expectedKVs) < 1 {
t.Fatalf("source of truth had no expected KVs; likely a bug in the test itself")
}
return expectedKVs
}
func runIncrementalBenchmark(
b *testing.B, emk engineMaker, useTBI bool, ts hlc.Timestamp, opts benchDataOptions,
) {
eng, _ := setupMVCCData(context.Background(), b, emk, opts)
{
// Pull all of the sstables into the cache. This
// probably defeats a lot of the benefits of the
// time-based optimization.
iter := eng.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{UpperBound: roachpb.KeyMax})
_, _ = iter.ComputeStats(keys.LocalMax, roachpb.KeyMax, 0)
iter.Close()
}
defer eng.Close()
startKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(0)))
endKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(opts.numKeys)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := NewMVCCIncrementalIterator(eng, MVCCIncrementalIterOptions{
EnableTimeBoundIteratorOptimization: useTBI,
EndKey: endKey,
StartTime: ts,
EndTime: hlc.MaxTimestamp,
})
defer it.Close()
it.SeekGE(MVCCKey{Key: startKey})
for {
if ok, err := it.Valid(); err != nil {
b.Fatalf("failed incremental iteration: %+v", err)
} else if !ok {
break
}
it.Next()
}
}
}
func BenchmarkMVCCIncrementalIterator(b *testing.B) {
defer log.Scope(b).Close(b)
numVersions := 100
numKeys := 1000
// Mean of 50 versions * 1000 bytes results in more than one block per
// versioned key, so there is some chance of
// EnableTimeBoundIteratorOptimization=true being useful.
valueBytes := 1000
setupMVCCPebbleWithBlockProperties := func(b testing.TB, dir string) Engine {
peb, err := Open(
context.Background(),
Filesystem(dir),
CacheSize(testCacheSize),
func(cfg *engineConfig) error {
cfg.Opts.FormatMajorVersion = pebble.FormatBlockPropertyCollector
return nil
})
if err != nil {
b.Fatalf("could not create new pebble instance at %s: %+v", dir, err)
}
return peb
}
for _, useTBI := range []bool{true, false} {
b.Run(fmt.Sprintf("useTBI=%v", useTBI), func(b *testing.B) {
for _, tsExcludePercent := range []float64{0, 0.95} {
wallTime := int64((5 * (float64(numVersions)*tsExcludePercent + 1)))
ts := hlc.Timestamp{WallTime: wallTime}
b.Run(fmt.Sprintf("ts=%d", ts.WallTime), func(b *testing.B) {
runIncrementalBenchmark(b, setupMVCCPebbleWithBlockProperties, useTBI, ts, benchDataOptions{
numVersions: numVersions,
numKeys: numKeys,
valueBytes: valueBytes,
})
})
}
})
}
}
// BenchmarkMVCCIncrementalIteratorForOldData is a benchmark for the case of
// finding old data when most data is in L6. This uses the MVCC timestamp to
// define age, for convenience, though it could be a different field in the
// key if one wrote a BlockPropertyCollector that could parse the key to find
// the field (for instance the crdb_internal_ttl_expiration used in
// https://github.com/cockroachdb/cockroach/pull/70241).
func BenchmarkMVCCIncrementalIteratorForOldData(b *testing.B) {
defer log.Scope(b).Close(b)
numKeys := 10000
// 1 in 400 keys is being looked for. Roughly corresponds to a TTL of
// slightly longer than 1 year, where each day, we run a pass to expire 1
// day of keys. The old keys are uniformly distributed in the key space,
// which is the worst case for block property filters.
keyAgeInterval := 400
setupMVCCPebbleWithBlockProperties := func(b *testing.B) Engine {
eng, err := Open(
context.Background(),
InMemory(),
// Use a small cache size. Scanning large tables with mostly cold data
// will mostly miss the cache (especially since the block cache is meant
// to be scan resistant).
CacheSize(1<<10),
func(cfg *engineConfig) error {
cfg.Opts.FormatMajorVersion = pebble.FormatBlockPropertyCollector
return nil
})
if err != nil {
b.Fatal(err)
}
return eng
}
baseTimestamp := int64(1000)
setupData := func(b *testing.B, eng Engine, valueSize int) {
// Generate the same data every time.
rng := rand.New(rand.NewSource(1449168817))
batch := eng.NewBatch()
for i := 0; i < numKeys; i++ {
if (i+1)%100 == 0 {
if err := batch.Commit(false /* sync */); err != nil {
b.Fatal(err)
}
batch.Close()
batch = eng.NewBatch()
}
key := encoding.EncodeUvarintAscending([]byte("key-"), uint64(i))
value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
value.InitChecksum(key)
ts := hlc.Timestamp{WallTime: baseTimestamp + 100*int64(i%keyAgeInterval)}
if err := MVCCPut(
context.Background(), batch, nil /* ms */, key, ts, value, nil); err != nil {
b.Fatal(err)
}
}
if err := eng.Flush(); err != nil {
b.Fatal(err)
}
if err := eng.Compact(); err != nil {
b.Fatal(err)
}
}
for _, valueSize := range []int{100, 500, 1000, 2000} {
eng := setupMVCCPebbleWithBlockProperties(b)
setupData(b, eng, valueSize)
b.Run(fmt.Sprintf("valueSize=%d", valueSize), func(b *testing.B) {
for _, useTBI := range []bool{true, false} {
b.Run(fmt.Sprintf("useTBI=%t", useTBI), func(b *testing.B) {
startKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(0)))
endKey := roachpb.Key(encoding.EncodeUvarintAscending([]byte("key-"), uint64(numKeys)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := NewMVCCIncrementalIterator(eng, MVCCIncrementalIterOptions{
EnableTimeBoundIteratorOptimization: useTBI,
EndKey: endKey,
StartTime: hlc.Timestamp{},
EndTime: hlc.Timestamp{WallTime: baseTimestamp},
})
it.SeekGE(MVCCKey{Key: startKey})
for {
if ok, err := it.Valid(); err != nil {
b.Fatalf("failed incremental iteration: %+v", err)
} else if !ok {
break
}
it.Next()
}
it.Close()
}
})
}
})
eng.Close()
}
}
| pkg/storage/mvcc_incremental_iterator_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.000453155895229429,
0.00017261356697417796,
0.00016121465887408704,
0.0001703559246379882,
0.000025440107492613606
] |
{
"id": 3,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 50
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
import (
"bytes"
"context"
"fmt"
"math"
"strings"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/lib/pq/oid"
)
// SpecializedVectorizedBuiltin is used to map overloads
// to the vectorized operator that is specific to
// that implementation of the builtin function.
type SpecializedVectorizedBuiltin int
// TODO (rohany): What is the best place to put this list?
// I want to put it in builtins or exec, but those create an import
// cycle with exec. tree is imported by both of them, so
// this package seems like a good place to do it.
// Keep this list alphabetized so that it is easy to manage.
const (
_ SpecializedVectorizedBuiltin = iota
SubstringStringIntInt
)
// Overload is one of the overloads of a built-in function.
// Each FunctionDefinition may contain one or more overloads.
type Overload struct {
Types TypeList
ReturnType ReturnTyper
Volatility Volatility
// PreferredOverload determines overload resolution as follows.
// When multiple overloads are eligible based on types even after all of of
// the heuristics to pick one have been used, if one of the overloads is a
// Overload with the `PreferredOverload` flag set to true it can be selected
// rather than returning a no-such-method error.
// This should generally be avoided -- avoiding introducing ambiguous
// overloads in the first place is a much better solution -- and only done
// after consultation with @knz @nvanbenschoten.
PreferredOverload bool
// Info is a description of the function, which is surfaced on the CockroachDB
// docs site on the "Functions and Operators" page. Descriptions typically use
// third-person with the function as an implicit subject (e.g. "Calculates
// infinity"), but should focus more on ease of understanding so other structures
// might be more appropriate.
Info string
AggregateFunc func([]*types.T, *EvalContext, Datums) AggregateFunc
WindowFunc func([]*types.T, *EvalContext) WindowFunc
// Only one of the following three attributes can be set.
// Fn is the normal builtin implementation function. It's for functions that
// take in Datums and return a Datum.
Fn func(*EvalContext, Datums) (Datum, error)
// FnWithExprs is for builtins that need access to their arguments as Exprs
// and not pre-evaluated Datums, but is otherwise identical to Fn.
FnWithExprs func(*EvalContext, Exprs) (Datum, error)
// Generator is for SRFs. SRFs take Datums and return multiple rows of Datums.
Generator GeneratorFactory
// GeneratorWithExprs is for SRFs that need access to their arguments as Exprs
// and not pre-evaluated Datums, but is otherwise identical to Generator.
GeneratorWithExprs GeneratorWithExprsFactory
// SQLFn must be set for overloads of type SQLClass. It should return a SQL
// statement which will be executed as a common table expression in the query.
SQLFn func(*EvalContext, Datums) (string, error)
// counter, if non-nil, should be incremented upon successful
// type check of expressions using this overload.
counter telemetry.Counter
// SpecializedVecBuiltin is used to let the vectorized engine
// know when an Overload has a specialized vectorized operator.
SpecializedVecBuiltin SpecializedVectorizedBuiltin
// IgnoreVolatilityCheck ignores checking the functions overload's
// volatility against Postgres's volatility at test time.
// This should be used with caution.
IgnoreVolatilityCheck bool
// Oid is the cached oidHasher.BuiltinOid result for this Overload. It's
// populated at init-time.
Oid oid.Oid
// DistsqlBlocklist is set to true when a function cannot be evaluated in
// DistSQL. One example is when the type information for function arguments
// cannot be recovered.
DistsqlBlocklist bool
}
// params implements the overloadImpl interface.
func (b Overload) params() TypeList { return b.Types }
// returnType implements the overloadImpl interface.
func (b Overload) returnType() ReturnTyper { return b.ReturnType }
// preferred implements the overloadImpl interface.
func (b Overload) preferred() bool { return b.PreferredOverload }
// FixedReturnType returns a fixed type that the function returns, returning Any
// if the return type is based on the function's arguments.
func (b Overload) FixedReturnType() *types.T {
if b.ReturnType == nil {
return nil
}
return returnTypeToFixedType(b.ReturnType, nil)
}
// InferReturnTypeFromInputArgTypes returns the type that the function returns,
// inferring the type based on the function's inputTypes if necessary.
func (b Overload) InferReturnTypeFromInputArgTypes(inputTypes []*types.T) *types.T {
retTyp := b.FixedReturnType()
// If the output type of the function depends on its inputs, then
// the output of FixedReturnType will be ambiguous. In the ambiguous
// cases, use the information about the input types to construct the
// appropriate output type. The tree.ReturnTyper interface is
// []tree.TypedExpr -> *types.T, so construct the []tree.TypedExpr
// from the types that we know are the inputs. Note that we don't
// try to create datums of each input type, and instead use this
// "TypedDummy" construct. This is because some types don't have resident
// members (like an ENUM with no values), and we shouldn't error out
// trying to infer the return type in those cases.
if retTyp.IsAmbiguous() {
args := make([]TypedExpr, len(inputTypes))
for i, t := range inputTypes {
args[i] = &TypedDummy{Typ: t}
}
// Evaluate ReturnType with the fake input set of arguments.
retTyp = returnTypeToFixedType(b.ReturnType, args)
}
return retTyp
}
// IsGenerator returns true if the function is a set returning function (SRF).
func (b Overload) IsGenerator() bool {
return b.Generator != nil || b.GeneratorWithExprs != nil
}
// Signature returns a human-readable signature.
// If simplify is bool, tuple-returning functions with just
// 1 tuple element unwrap the return type in the signature.
func (b Overload) Signature(simplify bool) string {
retType := b.FixedReturnType()
if simplify {
if retType.Family() == types.TupleFamily && len(retType.TupleContents()) == 1 {
retType = retType.TupleContents()[0]
}
}
return fmt.Sprintf("(%s) -> %s", b.Types.String(), retType)
}
// overloadImpl is an implementation of an overloaded function. It provides
// access to the parameter type list and the return type of the implementation.
//
// This is a more general type than Overload defined above, because it also
// works with the built-in binary and unary operators.
type overloadImpl interface {
params() TypeList
returnType() ReturnTyper
// allows manually resolving preference between multiple compatible overloads.
preferred() bool
}
var _ overloadImpl = &Overload{}
var _ overloadImpl = &UnaryOp{}
var _ overloadImpl = &BinOp{}
var _ overloadImpl = &CmpOp{}
// GetParamsAndReturnType gets the parameters and return type of an
// overloadImpl.
func GetParamsAndReturnType(impl overloadImpl) (TypeList, ReturnTyper) {
return impl.params(), impl.returnType()
}
// TypeList is a list of types representing a function parameter list.
type TypeList interface {
// Match checks if all types in the TypeList match the corresponding elements in types.
Match(types []*types.T) bool
// MatchAt checks if the parameter type at index i of the TypeList matches type typ.
// In all implementations, types.Null will match with each parameter type, allowing
// NULL values to be used as arguments.
MatchAt(typ *types.T, i int) bool
// MatchLen checks that the TypeList can support l parameters.
MatchLen(l int) bool
// GetAt returns the type at the given index in the TypeList, or nil if the TypeList
// cannot have a parameter at index i.
GetAt(i int) *types.T
// Length returns the number of types in the list
Length() int
// Types returns a realized copy of the list. variadic lists return a list of size one.
Types() []*types.T
// String returns a human readable signature
String() string
}
var _ TypeList = ArgTypes{}
var _ TypeList = HomogeneousType{}
var _ TypeList = VariadicType{}
// ArgTypes is very similar to ArgTypes except it allows keeping a string
// name for each argument as well and using those when printing the
// human-readable signature.
type ArgTypes []struct {
Name string
Typ *types.T
}
// Match is part of the TypeList interface.
func (a ArgTypes) Match(types []*types.T) bool {
if len(types) != len(a) {
return false
}
for i := range types {
if !a.MatchAt(types[i], i) {
return false
}
}
return true
}
// MatchAt is part of the TypeList interface.
func (a ArgTypes) MatchAt(typ *types.T, i int) bool {
// The parameterized types for Tuples are checked in the type checking
// routines before getting here, so we only need to check if the argument
// type is a types.TUPLE below. This allows us to avoid defining overloads
// for types.Tuple{}, types.Tuple{types.Any}, types.Tuple{types.Any, types.Any},
// etc. for Tuple operators.
if typ.Family() == types.TupleFamily {
typ = types.AnyTuple
}
return i < len(a) && (typ.Family() == types.UnknownFamily || a[i].Typ.Equivalent(typ))
}
// MatchLen is part of the TypeList interface.
func (a ArgTypes) MatchLen(l int) bool {
return len(a) == l
}
// GetAt is part of the TypeList interface.
func (a ArgTypes) GetAt(i int) *types.T {
return a[i].Typ
}
// Length is part of the TypeList interface.
func (a ArgTypes) Length() int {
return len(a)
}
// Types is part of the TypeList interface.
func (a ArgTypes) Types() []*types.T {
n := len(a)
ret := make([]*types.T, n)
for i, s := range a {
ret[i] = s.Typ
}
return ret
}
func (a ArgTypes) String() string {
var s strings.Builder
for i, arg := range a {
if i > 0 {
s.WriteString(", ")
}
s.WriteString(arg.Name)
s.WriteString(": ")
s.WriteString(arg.Typ.String())
}
return s.String()
}
// HomogeneousType is a TypeList implementation that accepts any arguments, as
// long as all are the same type or NULL. The homogeneous constraint is enforced
// in typeCheckOverloadedExprs.
type HomogeneousType struct{}
// Match is part of the TypeList interface.
func (HomogeneousType) Match(types []*types.T) bool {
return true
}
// MatchAt is part of the TypeList interface.
func (HomogeneousType) MatchAt(typ *types.T, i int) bool {
return true
}
// MatchLen is part of the TypeList interface.
func (HomogeneousType) MatchLen(l int) bool {
return true
}
// GetAt is part of the TypeList interface.
func (HomogeneousType) GetAt(i int) *types.T {
return types.Any
}
// Length is part of the TypeList interface.
func (HomogeneousType) Length() int {
return 1
}
// Types is part of the TypeList interface.
func (HomogeneousType) Types() []*types.T {
return []*types.T{types.Any}
}
func (HomogeneousType) String() string {
return "anyelement..."
}
// VariadicType is a TypeList implementation which accepts a fixed number of
// arguments at the beginning and an arbitrary number of homogenous arguments
// at the end.
type VariadicType struct {
FixedTypes []*types.T
VarType *types.T
}
// Match is part of the TypeList interface.
func (v VariadicType) Match(types []*types.T) bool {
for i := range types {
if !v.MatchAt(types[i], i) {
return false
}
}
return true
}
// MatchAt is part of the TypeList interface.
func (v VariadicType) MatchAt(typ *types.T, i int) bool {
if i < len(v.FixedTypes) {
return typ.Family() == types.UnknownFamily || v.FixedTypes[i].Equivalent(typ)
}
return typ.Family() == types.UnknownFamily || v.VarType.Equivalent(typ)
}
// MatchLen is part of the TypeList interface.
func (v VariadicType) MatchLen(l int) bool {
return l >= len(v.FixedTypes)
}
// GetAt is part of the TypeList interface.
func (v VariadicType) GetAt(i int) *types.T {
if i < len(v.FixedTypes) {
return v.FixedTypes[i]
}
return v.VarType
}
// Length is part of the TypeList interface.
func (v VariadicType) Length() int {
return len(v.FixedTypes) + 1
}
// Types is part of the TypeList interface.
func (v VariadicType) Types() []*types.T {
result := make([]*types.T, len(v.FixedTypes)+1)
for i := range v.FixedTypes {
result[i] = v.FixedTypes[i]
}
result[len(result)-1] = v.VarType
return result
}
func (v VariadicType) String() string {
var s bytes.Buffer
for i, t := range v.FixedTypes {
if i != 0 {
s.WriteString(", ")
}
s.WriteString(t.String())
}
if len(v.FixedTypes) > 0 {
s.WriteString(", ")
}
fmt.Fprintf(&s, "%s...", v.VarType)
return s.String()
}
// UnknownReturnType is returned from ReturnTypers when the arguments provided are
// not sufficient to determine a return type. This is necessary for cases like overload
// resolution, where the argument types are not resolved yet so the type-level function
// will be called without argument types. If a ReturnTyper returns unknownReturnType,
// then the candidate function set cannot be refined. This means that only ReturnTypers
// that never return unknownReturnType, like those created with FixedReturnType, can
// help reduce overload ambiguity.
var UnknownReturnType *types.T
// ReturnTyper defines the type-level function in which a builtin function's return type
// is determined. ReturnTypers should make sure to return unknownReturnType when necessary.
type ReturnTyper func(args []TypedExpr) *types.T
// FixedReturnType functions simply return a fixed type, independent of argument types.
func FixedReturnType(typ *types.T) ReturnTyper {
return func(args []TypedExpr) *types.T { return typ }
}
// IdentityReturnType creates a returnType that is a projection of the idx'th
// argument type.
func IdentityReturnType(idx int) ReturnTyper {
return func(args []TypedExpr) *types.T {
if len(args) == 0 {
return UnknownReturnType
}
return args[idx].ResolvedType()
}
}
// ArrayOfFirstNonNullReturnType returns an array type from the first non-null
// type in the argument list.
func ArrayOfFirstNonNullReturnType() ReturnTyper {
return func(args []TypedExpr) *types.T {
if len(args) == 0 {
return UnknownReturnType
}
for _, arg := range args {
if t := arg.ResolvedType(); t.Family() != types.UnknownFamily {
return types.MakeArray(t)
}
}
return types.Unknown
}
}
// FirstNonNullReturnType returns the type of the first non-null argument, or
// types.Unknown if all arguments are null. There must be at least one argument,
// or else FirstNonNullReturnType returns UnknownReturnType. This method is used
// with HomogeneousType functions, in which all arguments have been checked to
// have the same type (or be null).
func FirstNonNullReturnType() ReturnTyper {
return func(args []TypedExpr) *types.T {
if len(args) == 0 {
return UnknownReturnType
}
for _, arg := range args {
if t := arg.ResolvedType(); t.Family() != types.UnknownFamily {
return t
}
}
return types.Unknown
}
}
func returnTypeToFixedType(s ReturnTyper, inputTyps []TypedExpr) *types.T {
if t := s(inputTyps); t != UnknownReturnType {
return t
}
return types.Any
}
type typeCheckOverloadState struct {
overloads []overloadImpl
overloadIdxs []uint8 // index into overloads
exprs []Expr
typedExprs []TypedExpr
resolvableIdxs []int // index into exprs/typedExprs
constIdxs []int // index into exprs/typedExprs
placeholderIdxs []int // index into exprs/typedExprs
}
// typeCheckOverloadedExprs determines the correct overload to use for the given set of
// expression parameters, along with an optional desired return type. It returns the expression
// parameters after being type checked, along with a slice of candidate overloadImpls. The
// slice may have length:
// 0: overload resolution failed because no compatible overloads were found
// 1: overload resolution succeeded
// 2+: overload resolution failed because of ambiguity
// The inBinOp parameter denotes whether this type check is occurring within a binary operator,
// in which case we may need to make a guess that the two parameters are of the same type if one
// of them is NULL.
func typeCheckOverloadedExprs(
ctx context.Context,
semaCtx *SemaContext,
desired *types.T,
overloads []overloadImpl,
inBinOp bool,
exprs ...Expr,
) ([]TypedExpr, []overloadImpl, error) {
if len(overloads) > math.MaxUint8 {
return nil, nil, errors.AssertionFailedf("too many overloads (%d > 255)", len(overloads))
}
var s typeCheckOverloadState
s.exprs = exprs
s.overloads = overloads
// Special-case the HomogeneousType overload. We determine its return type by checking that
// all parameters have the same type.
for i, overload := range overloads {
// Only one overload can be provided if it has parameters with HomogeneousType.
if _, ok := overload.params().(HomogeneousType); ok {
if len(overloads) > 1 {
return nil, nil, errors.AssertionFailedf(
"only one overload can have HomogeneousType parameters")
}
typedExprs, _, err := TypeCheckSameTypedExprs(ctx, semaCtx, desired, exprs...)
if err != nil {
return nil, nil, err
}
return typedExprs, overloads[i : i+1], nil
}
}
// Hold the resolved type expressions of the provided exprs, in order.
s.typedExprs = make([]TypedExpr, len(exprs))
s.constIdxs, s.placeholderIdxs, s.resolvableIdxs = typeCheckSplitExprs(ctx, semaCtx, exprs)
// If no overloads are provided, just type check parameters and return.
if len(overloads) == 0 {
for _, i := range s.resolvableIdxs {
typ, err := exprs[i].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return nil, nil, pgerror.Wrapf(err, pgcode.InvalidParameterValue,
"error type checking resolved expression:")
}
s.typedExprs[i] = typ
}
if err := defaultTypeCheck(ctx, semaCtx, &s, false); err != nil {
return nil, nil, err
}
return s.typedExprs, nil, nil
}
s.overloadIdxs = make([]uint8, len(overloads))
for i := 0; i < len(overloads); i++ {
s.overloadIdxs[i] = uint8(i)
}
// Filter out incorrect parameter length overloads.
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().MatchLen(len(exprs))
})
// Filter out overloads which constants cannot become.
for _, i := range s.constIdxs {
constExpr := exprs[i].(Constant)
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return canConstantBecome(constExpr, o.params().GetAt(i))
})
}
// TODO(nvanbenschoten): We should add a filtering step here to filter
// out impossible candidates based on identical parameters. For instance,
// f(int, float) is not a possible candidate for the expression f($1, $1).
// Filter out overloads on resolved types.
for _, i := range s.resolvableIdxs {
paramDesired := types.Any
// If all remaining candidates require the same type for this parameter,
// begin desiring that type for the corresponding argument expression.
// Note that this is always the case when we have a single overload left.
var sameType *types.T
for _, ovIdx := range s.overloadIdxs {
typ := s.overloads[ovIdx].params().GetAt(i)
if sameType == nil {
sameType = typ
} else if !typ.Identical(sameType) {
sameType = nil
break
}
}
if sameType != nil {
paramDesired = sameType
}
typ, err := exprs[i].TypeCheck(ctx, semaCtx, paramDesired)
if err != nil {
return nil, nil, err
}
s.typedExprs[i] = typ
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().MatchAt(typ.ResolvedType(), i)
})
}
// At this point, all remaining overload candidates accept the argument list,
// so we begin checking for a single remaining candidate implementation to choose.
// In case there is more than one candidate remaining, the following code uses
// heuristics to find a most preferable candidate.
if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok {
return typedExprs, fns, err
}
// The first heuristic is to prefer candidates that return the desired type,
// if a desired type was provided.
if desired.Family() != types.AnyFamily {
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
// For now, we only filter on the return type for overloads with
// fixed return types. This could be improved, but is not currently
// critical because we have no cases of functions with multiple
// overloads that do not all expose FixedReturnTypes.
if t := o.returnType()(nil); t != UnknownReturnType {
return t.Equivalent(desired)
}
return true
})
if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok {
return typedExprs, fns, err
}
}
var homogeneousTyp *types.T
if len(s.resolvableIdxs) > 0 {
homogeneousTyp = s.typedExprs[s.resolvableIdxs[0]].ResolvedType()
for _, i := range s.resolvableIdxs[1:] {
if !homogeneousTyp.Equivalent(s.typedExprs[i].ResolvedType()) {
homogeneousTyp = nil
break
}
}
}
if len(s.constIdxs) > 0 {
allConstantsAreHomogenous := false
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() {
// The second heuristic is to prefer candidates where all constants can
// become a homogeneous type, if all resolvable expressions became one.
// This is only possible if resolvable expressions were resolved
// homogeneously up to this point.
if homogeneousTyp != nil {
allConstantsAreHomogenous = true
for _, i := range s.constIdxs {
if !canConstantBecome(exprs[i].(Constant), homogeneousTyp) {
allConstantsAreHomogenous = false
break
}
}
if allConstantsAreHomogenous {
for _, i := range s.constIdxs {
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(i).Equivalent(homogeneousTyp)
})
}
}
}
}); ok {
return typedExprs, fns, err
}
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() {
// The third heuristic is to prefer candidates where all constants can
// become their "natural" types.
for _, i := range s.constIdxs {
natural := naturalConstantType(exprs[i].(Constant))
if natural != nil {
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(i).Equivalent(natural)
})
}
}
}); ok {
return typedExprs, fns, err
}
// At this point, it's worth seeing if we have constants that can't actually
// parse as the type that canConstantBecome claims they can. For example,
// every string literal will report that it can become an interval, but most
// string literals do not encode valid intervals. This may uncover some
// overloads with invalid type signatures.
//
// This parsing is sufficiently expensive (see the comment on
// StrVal.AvailableTypes) that we wait until now, when we've eliminated most
// overloads from consideration, so that we only need to check each constant
// against a limited set of types. We can't hold off on this parsing any
// longer, though: the remaining heuristics are overly aggressive and will
// falsely reject the only valid overload in some cases.
//
// This case is broken into two parts. We first attempt to use the
// information about the homogeneity of our constants collected by previous
// heuristic passes. If:
// * all our constants are homogeneous
// * we only have a single overload left
// * the constant overload parameters are homogeneous as well
// then match this overload with the homogeneous constants. Otherwise,
// continue to filter overloads by whether or not the constants can parse
// into the desired types of the overloads.
// This first case is important when resolving overloads for operations
// between user-defined types, where we need to propagate the concrete
// resolved type information over to the constants, rather than attempting
// to resolve constants as the placeholder type for the user defined type
// family (like `AnyEnum`).
if len(s.overloadIdxs) == 1 && allConstantsAreHomogenous {
overloadParamsAreHomogenous := true
p := s.overloads[s.overloadIdxs[0]].params()
for _, i := range s.constIdxs {
if !p.GetAt(i).Equivalent(homogeneousTyp) {
overloadParamsAreHomogenous = false
break
}
}
if overloadParamsAreHomogenous {
// Type check our constants using the homogeneous type rather than
// the type in overload parameter. This lets us type check user defined
// types with a concrete type instance, rather than an ambiguous type.
for _, i := range s.constIdxs {
typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, homogeneousTyp)
if err != nil {
return nil, nil, err
}
s.typedExprs[i] = typ
}
_, typedExprs, fn, err := checkReturnPlaceholdersAtIdx(ctx, semaCtx, &s, int(s.overloadIdxs[0]))
return typedExprs, fn, err
}
}
for _, i := range s.constIdxs {
constExpr := exprs[i].(Constant)
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
semaCtx := MakeSemaContext()
_, err := constExpr.ResolveAsType(ctx, &semaCtx, o.params().GetAt(i))
return err == nil
})
}
if ok, typedExprs, fn, err := checkReturn(ctx, semaCtx, &s); ok {
return typedExprs, fn, err
}
// The fourth heuristic is to prefer candidates that accepts the "best"
// mutual type in the resolvable type set of all constants.
if bestConstType, ok := commonConstantType(s.exprs, s.constIdxs); ok {
for _, i := range s.constIdxs {
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(i).Equivalent(bestConstType)
})
}
if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok {
return typedExprs, fns, err
}
if homogeneousTyp != nil {
if !homogeneousTyp.Equivalent(bestConstType) {
homogeneousTyp = nil
}
} else {
homogeneousTyp = bestConstType
}
}
}
// The fifth heuristic is to defer to preferred candidates, if one has been
// specified in the overload list.
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() {
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs, func(o overloadImpl) bool {
return o.preferred()
})
}); ok {
return typedExprs, fns, err
}
// The sixth heuristic is to prefer candidates where all placeholders can be
// given the same type as all constants and resolvable expressions. This is
// only possible if all constants and resolvable expressions were resolved
// homogeneously up to this point.
if homogeneousTyp != nil && len(s.placeholderIdxs) > 0 {
// Before we continue, try to propagate the homogeneous type to the
// placeholders. This might not have happened yet, if the overloads'
// parameter types are ambiguous (like in the case of tuple-tuple binary
// operators).
for _, i := range s.placeholderIdxs {
if _, err := exprs[i].TypeCheck(ctx, semaCtx, homogeneousTyp); err != nil {
return nil, nil, err
}
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(i).Equivalent(homogeneousTyp)
})
}
if ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, &s); ok {
return typedExprs, fns, err
}
}
// This is a total hack for AnyEnum whilst we don't have postgres type resolution.
// This enables AnyEnum array ops to not need a cast, e.g. array['a']::enum[] = '{a}'.
// If we have one remaining candidate containing AnyEnum, cast all remaining
// arguments to a known enum and check that the rest match. This is a poor man's
// implicit cast / postgres "same argument" resolution clone.
if len(s.overloadIdxs) == 1 {
params := s.overloads[s.overloadIdxs[0]].params()
var knownEnum *types.T
// Check we have all "AnyEnum" (or "AnyEnum" array) arguments and that
// one argument is typed with an enum.
attemptAnyEnumCast := func() bool {
for i := 0; i < params.Length(); i++ {
typ := params.GetAt(i)
// Note we are deliberately looking at whether the built-in takes in
// AnyEnum as an argument, not the exprs given to the overload itself.
if !(typ.Identical(types.AnyEnum) || typ.Identical(types.MakeArray(types.AnyEnum))) {
return false
}
if s.typedExprs[i] != nil {
// Assign the known enum if it was previously unassigned.
// Otherwise, double check it matches a previously defined enum.
posEnum := s.typedExprs[i].ResolvedType()
if !posEnum.UserDefined() {
return false
}
if posEnum.Family() == types.ArrayFamily {
posEnum = posEnum.ArrayContents()
}
if knownEnum == nil {
knownEnum = posEnum
} else if !posEnum.Identical(knownEnum) {
return false
}
}
}
return knownEnum != nil
}()
// If we have all arguments as AnyEnum, and we know at least one of the
// enum's actual type, try type cast the rest.
if attemptAnyEnumCast {
// Copy exprs to prevent any overwrites of underlying s.exprs array later.
sCopy := s
sCopy.exprs = make([]Expr, len(s.exprs))
copy(sCopy.exprs, s.exprs)
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &sCopy, func() {
for _, idx := range append(s.constIdxs, s.placeholderIdxs...) {
p := params.GetAt(idx)
typCast := knownEnum
if p.Family() == types.ArrayFamily {
typCast = types.MakeArray(knownEnum)
}
sCopy.exprs[idx] = &CastExpr{Expr: sCopy.exprs[idx], Type: typCast, SyntaxMode: CastShort}
}
}); ok {
return typedExprs, fns, err
}
}
}
// In a binary expression, in the case of one of the arguments being untyped NULL,
// we prefer overloads where we infer the type of the NULL to be the same as the
// other argument. This is used to differentiate the behavior of
// STRING[] || NULL and STRING || NULL.
if inBinOp && len(s.exprs) == 2 {
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() {
var err error
left := s.typedExprs[0]
if left == nil {
left, err = s.exprs[0].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return
}
}
right := s.typedExprs[1]
if right == nil {
right, err = s.exprs[1].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return
}
}
leftType := left.ResolvedType()
rightType := right.ResolvedType()
leftIsNull := leftType.Family() == types.UnknownFamily
rightIsNull := rightType.Family() == types.UnknownFamily
oneIsNull := (leftIsNull || rightIsNull) && !(leftIsNull && rightIsNull)
if oneIsNull {
if leftIsNull {
leftType = rightType
}
if rightIsNull {
rightType = leftType
}
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(0).Equivalent(leftType) &&
o.params().GetAt(1).Equivalent(rightType)
})
}
}); ok {
return typedExprs, fns, err
}
}
// After the previous heuristic, in a binary expression, in the case of one of the arguments being untyped
// NULL, we prefer overloads where we infer the type of the NULL to be a STRING. This is used
// to choose INT || NULL::STRING over INT || NULL::INT[].
if inBinOp && len(s.exprs) == 2 {
if ok, typedExprs, fns, err := filterAttempt(ctx, semaCtx, &s, func() {
var err error
left := s.typedExprs[0]
if left == nil {
left, err = s.exprs[0].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return
}
}
right := s.typedExprs[1]
if right == nil {
right, err = s.exprs[1].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return
}
}
leftType := left.ResolvedType()
rightType := right.ResolvedType()
leftIsNull := leftType.Family() == types.UnknownFamily
rightIsNull := rightType.Family() == types.UnknownFamily
oneIsNull := (leftIsNull || rightIsNull) && !(leftIsNull && rightIsNull)
if oneIsNull {
if leftIsNull {
leftType = types.String
}
if rightIsNull {
rightType = types.String
}
s.overloadIdxs = filterOverloads(s.overloads, s.overloadIdxs,
func(o overloadImpl) bool {
return o.params().GetAt(0).Equivalent(leftType) &&
o.params().GetAt(1).Equivalent(rightType)
})
}
}); ok {
return typedExprs, fns, err
}
}
if err := defaultTypeCheck(ctx, semaCtx, &s, len(s.overloads) > 0); err != nil {
return nil, nil, err
}
possibleOverloads := make([]overloadImpl, len(s.overloadIdxs))
for i, o := range s.overloadIdxs {
possibleOverloads[i] = s.overloads[o]
}
return s.typedExprs, possibleOverloads, nil
}
// filterAttempt attempts to filter the overloads down to a single candidate.
// If it succeeds, it will return true, along with the overload (in a slice for
// convenience) and a possible error. If it fails, it will return false and
// undo any filtering performed during the attempt.
func filterAttempt(
ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, attempt func(),
) (ok bool, _ []TypedExpr, _ []overloadImpl, _ error) {
before := s.overloadIdxs
attempt()
if len(s.overloadIdxs) == 1 {
ok, typedExprs, fns, err := checkReturn(ctx, semaCtx, s)
if err != nil {
return false, nil, nil, err
}
if ok {
return true, typedExprs, fns, err
}
}
s.overloadIdxs = before
return false, nil, nil, nil
}
// filterOverloads filters overloads which do not satisfy the predicate.
func filterOverloads(
overloads []overloadImpl, overloadIdxs []uint8, fn func(overloadImpl) bool,
) []uint8 {
for i := 0; i < len(overloadIdxs); {
if fn(overloads[overloadIdxs[i]]) {
i++
} else {
overloadIdxs[i], overloadIdxs[len(overloadIdxs)-1] = overloadIdxs[len(overloadIdxs)-1], overloadIdxs[i]
overloadIdxs = overloadIdxs[:len(overloadIdxs)-1]
}
}
return overloadIdxs
}
// defaultTypeCheck type checks the constant and placeholder expressions without a preference
// and adds them to the type checked slice.
func defaultTypeCheck(
ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, errorOnPlaceholders bool,
) error {
for _, i := range s.constIdxs {
typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, types.Any)
if err != nil {
return pgerror.Wrapf(err, pgcode.InvalidParameterValue,
"error type checking constant value")
}
s.typedExprs[i] = typ
}
for _, i := range s.placeholderIdxs {
if errorOnPlaceholders {
_, err := s.exprs[i].TypeCheck(ctx, semaCtx, types.Any)
return err
}
// If we dont want to error on args, avoid type checking them without a desired type.
s.typedExprs[i] = StripParens(s.exprs[i]).(*Placeholder)
}
return nil
}
// checkReturn checks the number of remaining overloaded function
// implementations.
// Returns ok=true if we should stop overload resolution, and returning either
// 1. the chosen overload in a slice, or
// 2. nil,
// along with the typed arguments.
// This modifies values within s as scratch slices, but only in the case where
// it returns true, which signals to the calling function that it should
// immediately return, so any mutations to s are irrelevant.
func checkReturn(
ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState,
) (ok bool, _ []TypedExpr, _ []overloadImpl, _ error) {
switch len(s.overloadIdxs) {
case 0:
if err := defaultTypeCheck(ctx, semaCtx, s, false); err != nil {
return false, nil, nil, err
}
return true, s.typedExprs, nil, nil
case 1:
idx := s.overloadIdxs[0]
o := s.overloads[idx]
p := o.params()
for _, i := range s.constIdxs {
des := p.GetAt(i)
typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, des)
if err != nil {
return false, s.typedExprs, nil, pgerror.Wrapf(
err, pgcode.InvalidParameterValue,
"error type checking constant value",
)
}
if des != nil && !typ.ResolvedType().Equivalent(des) {
return false, nil, nil, errors.AssertionFailedf(
"desired constant value type %s but set type %s",
log.Safe(des), log.Safe(typ.ResolvedType()),
)
}
s.typedExprs[i] = typ
}
return checkReturnPlaceholdersAtIdx(ctx, semaCtx, s, int(idx))
default:
return false, nil, nil, nil
}
}
// checkReturnPlaceholdersAtIdx checks that the placeholders for the
// overload at the input index are valid. It has the same return values
// as checkReturn.
func checkReturnPlaceholdersAtIdx(
ctx context.Context, semaCtx *SemaContext, s *typeCheckOverloadState, idx int,
) (bool, []TypedExpr, []overloadImpl, error) {
o := s.overloads[idx]
p := o.params()
for _, i := range s.placeholderIdxs {
des := p.GetAt(i)
typ, err := s.exprs[i].TypeCheck(ctx, semaCtx, des)
if err != nil {
if des.IsAmbiguous() {
return false, nil, nil, nil
}
return false, nil, nil, err
}
s.typedExprs[i] = typ
}
return true, s.typedExprs, s.overloads[idx : idx+1], nil
}
func formatCandidates(prefix string, candidates []overloadImpl) string {
var buf bytes.Buffer
for _, candidate := range candidates {
buf.WriteString(prefix)
buf.WriteByte('(')
params := candidate.params()
tLen := params.Length()
inputTyps := make([]TypedExpr, tLen)
for i := 0; i < tLen; i++ {
t := params.GetAt(i)
inputTyps[i] = &TypedDummy{Typ: t}
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(t.String())
}
buf.WriteString(") -> ")
buf.WriteString(returnTypeToFixedType(candidate.returnType(), inputTyps).String())
if candidate.preferred() {
buf.WriteString(" [preferred]")
}
buf.WriteByte('\n')
}
return buf.String()
}
| pkg/sql/sem/tree/overload.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0003708941803779453,
0.00017061599646694958,
0.00015604721556883305,
0.0001686929608695209,
0.00002005200985877309
] |
{
"id": 3,
"code_window": [
" ParentID 52, ParentSchemaID 29: relation \"vehicle_location_histories\" (56): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): referenced database ID 52: referenced descriptor not found\n",
" ParentID 52, ParentSchemaID 29: relation \"promo_codes\" (57): processed\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): constraint id was missing for constraint: FOREIGN KEY with name \"fk_city_ref_users\"\n",
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"defaultdb\" (50): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"movr\" (52): descriptor not found\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"postgres\" (51): processed\n",
" ParentID 0, ParentSchemaID 0: namespace entry \"system\" (1): processed\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" ParentID 52, ParentSchemaID 29: relation \"user_promo_codes\" (58): referenced database ID 52: referenced descriptor not found\n"
],
"file_path": "pkg/cli/testdata/doctor/test_examine_zipdir_verbose",
"type": "replace",
"edit_start_line_idx": 50
} | # If you change this file, regenerate the certificate with
# openssl req -new -x509 -sha256 -key testserver.key -out testserver.crt -days 3650 -config testserver_config.cnf
#
default_bits = 2048
distinguished_name = dn
x509_extensions = san
req_extensions = san
extensions = san
prompt = no
[ dn ]
organizationName = MyCompany
[ san ]
subjectAltName = DNS:localhost
| pkg/ccl/sqlproxyccl/testdata/testserver_config.cnf | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001688458287389949,
0.0001660445996094495,
0.00016324337047990412,
0.0001660445996094495,
0.0000028012291295453906
] |
{
"id": 4,
"code_window": [
"\tdefer tc.Stopper().Stop(ctx)\n",
"\tsqlDB := tc.ServerConn(0)\n",
"\ttdb := sqlutils.MakeSQLRunner(sqlDB)\n",
"\t// Create table with a primary key constraint.\n",
"\ttdb.Exec(t, \"CREATE TABLE t(name int primary key)\")\n",
"\t// Validate the comments on constraints are blocked.\n",
"\ttdb.ExpectErr(t,\n",
"\t\t\"pq: cannot comment on constraint\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttdb.Exec(t, \"CREATE TABLE dep_t (fk INT8 PRIMARY KEY);\")\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t (\n",
"\tname INT8 PRIMARY KEY,\n",
"\tval INT8 REFERENCES dep_t (fk),\n",
"\tval2 INT8 CHECK (val2 > 0)\n",
");`,\n",
"\t)\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t_fk_dst (name INT8 PRIMARY KEY, val INT8 REFERENCES t (name));`)\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 58
} | debug doctor zipdir --verbose
----
debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose
reading testdata/doctor/debugzip/system.descriptor.txt
reading testdata/doctor/debugzip/system.namespace.txt
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
reading testdata/doctor/debugzip/system.jobs.txt
Examining 37 descriptors and 42 namespace entries...
ParentID 0, ParentSchemaID 0: database "system" (1): processed
ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: relation "users" (4): processed
ParentID 1, ParentSchemaID 29: relation "zones" (5): processed
ParentID 1, ParentSchemaID 29: relation "settings" (6): processed
ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed
ParentID 1, ParentSchemaID 29: relation "lease" (11): processed
ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: relation "ui" (14): processed
ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed
ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: relation "locations" (21): processed
ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed
ParentID 1, ParentSchemaID 29: relation "comments" (24): processed
ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed
ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed
ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: database "postgres" (51): processed
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "users" (53): processed
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles"
ParentID 52, ParentSchemaID 29: relation "rides" (55): processed
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides"
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed
ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed
ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed
ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed
ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed
ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed
ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed
ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed
ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed
ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed
ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed
ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed
ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed
ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed
ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed
ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed
Examining 2 jobs...
Processing job 587337426939772929
Processing job 587337426984566785
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir_verbose | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017701431352179497,
0.0001731855154503137,
0.00016574456822127104,
0.00017508094606455415,
0.00000387161844628281
] |
{
"id": 4,
"code_window": [
"\tdefer tc.Stopper().Stop(ctx)\n",
"\tsqlDB := tc.ServerConn(0)\n",
"\ttdb := sqlutils.MakeSQLRunner(sqlDB)\n",
"\t// Create table with a primary key constraint.\n",
"\ttdb.Exec(t, \"CREATE TABLE t(name int primary key)\")\n",
"\t// Validate the comments on constraints are blocked.\n",
"\ttdb.ExpectErr(t,\n",
"\t\t\"pq: cannot comment on constraint\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttdb.Exec(t, \"CREATE TABLE dep_t (fk INT8 PRIMARY KEY);\")\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t (\n",
"\tname INT8 PRIMARY KEY,\n",
"\tval INT8 REFERENCES dep_t (fk),\n",
"\tval2 INT8 CHECK (val2 > 0)\n",
");`,\n",
"\t)\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t_fk_dst (name INT8 PRIMARY KEY, val INT8 REFERENCES t (name));`)\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 58
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// Package oidext contains oids that are not in `github.com/lib/pq/oid`
// as they are not shipped by default with postgres.
// As CRDB does not support extensions, we'll need to automatically assign
// a few OIDs of our own.
package oidext
import "github.com/lib/pq/oid"
// CockroachPredefinedOIDMax defines the maximum OID allowed for use by
// non user defined types. OIDs for user defined types will start at
// CockroachPrefixedOIDMax and increase as new types are created.
// User defined type descriptors have a cluster-wide unique stable ID.
// CockroachPredefinedOIDMax defines the mapping from this stable ID to
// a type OID. In particular, stable ID + CockroachPredefinedOIDMax = type OID.
// types.StableTypeIDToOID and types.UserDefinedTypeOIDToID should be used when
// converting between stable ID's and type OIDs.
const CockroachPredefinedOIDMax = 100000
// OIDs in this block are extensions of postgres, thus having no official OID.
const (
T_geometry = oid.Oid(90000)
T__geometry = oid.Oid(90001)
T_geography = oid.Oid(90002)
T__geography = oid.Oid(90003)
T_box2d = oid.Oid(90004)
T__box2d = oid.Oid(90005)
)
// ExtensionTypeName returns a mapping from extension oids
// to their type name.
var ExtensionTypeName = map[oid.Oid]string{
T_geometry: "GEOMETRY",
T__geometry: "_GEOMETRY",
T_geography: "GEOGRAPHY",
T__geography: "_GEOGRAPHY",
T_box2d: "BOX2D",
T__box2d: "_BOX2D",
}
// TypeName checks the name for a given type by first looking up oid.TypeName
// before falling back to looking at the oid extension ExtensionTypeName.
func TypeName(o oid.Oid) (string, bool) {
name, ok := oid.TypeName[o]
if ok {
return name, ok
}
name, ok = ExtensionTypeName[o]
return name, ok
}
| pkg/sql/oidext/oidext.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017944342107512057,
0.0001709731441223994,
0.00016453258285764605,
0.0001700008288025856,
0.000005117177806823747
] |
{
"id": 4,
"code_window": [
"\tdefer tc.Stopper().Stop(ctx)\n",
"\tsqlDB := tc.ServerConn(0)\n",
"\ttdb := sqlutils.MakeSQLRunner(sqlDB)\n",
"\t// Create table with a primary key constraint.\n",
"\ttdb.Exec(t, \"CREATE TABLE t(name int primary key)\")\n",
"\t// Validate the comments on constraints are blocked.\n",
"\ttdb.ExpectErr(t,\n",
"\t\t\"pq: cannot comment on constraint\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttdb.Exec(t, \"CREATE TABLE dep_t (fk INT8 PRIMARY KEY);\")\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t (\n",
"\tname INT8 PRIMARY KEY,\n",
"\tval INT8 REFERENCES dep_t (fk),\n",
"\tval2 INT8 CHECK (val2 > 0)\n",
");`,\n",
"\t)\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t_fk_dst (name INT8 PRIMARY KEY, val INT8 REFERENCES t (name));`)\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 58
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package encoding
import (
"bytes"
"fmt"
"math"
"math/rand"
"testing"
"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
)
func TestDecimalMandE(t *testing.T) {
testCases := []struct {
Value string
E int
M []byte
}{
{"1.0", 1, []byte{0x02}},
{"10.0", 1, []byte{0x14}},
{"99.0", 1, []byte{0xc6}},
{"99.01", 1, []byte{0xc7, 0x02}},
{"99.0001", 1, []byte{0xc7, 0x01, 0x02}},
{"100.0", 2, []byte{0x02}},
{"100.01", 2, []byte{0x03, 0x01, 0x02}},
{"100.1", 2, []byte{0x03, 0x01, 0x14}},
{"1234", 2, []byte{0x19, 0x44}},
{"9999", 2, []byte{0xc7, 0xc6}},
{"9999.000001", 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x02}},
{"9999.000009", 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x12}},
{"9999.00001", 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x14}},
{"9999.00009", 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xb4}},
{"9999.000099", 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xc6}},
{"9999.0001", 2, []byte{0xc7, 0xc7, 0x01, 0x02}},
{"9999.001", 2, []byte{0xc7, 0xc7, 0x01, 0x14}},
{"9999.01", 2, []byte{0xc7, 0xc7, 0x02}},
{"9999.1", 2, []byte{0xc7, 0xc7, 0x14}},
{"10000", 3, []byte{0x02}},
{"10001", 3, []byte{0x03, 0x01, 0x02}},
{"12345", 3, []byte{0x03, 0x2f, 0x5a}},
{"123450", 3, []byte{0x19, 0x45, 0x64}},
{"1234.5", 2, []byte{0x19, 0x45, 0x64}},
{"12.345", 1, []byte{0x19, 0x45, 0x64}},
{"0.123", 0, []byte{0x19, 0x3c}},
{"0.0123", 0, []byte{0x03, 0x2e}},
{"0.00123", -1, []byte{0x19, 0x3c}},
{"1e-307", -153, []byte{0x14}},
{"1e308", 155, []byte{0x2}},
{"9223372036854775807", 10, []byte{0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for _, c := range testCases {
d := new(apd.Decimal)
if _, _, err := d.SetString(c.Value); err != nil {
t.Fatalf("could not parse decimal from string %q", c.Value)
}
if e, m := decimalEandM(d, nil); e != c.E || !bytes.Equal(m, c.M) {
t.Errorf("unexpected mismatch in E/M for %v. expected E=%v | M=[% x], got E=%v | M=[% x]",
c.Value, c.E, c.M, e, m)
}
}
}
func mustDecimal(s string) *apd.Decimal {
d, _, err := new(apd.Decimal).SetString(s)
if err != nil {
panic(fmt.Sprintf("could not set string %q on decimal", s))
}
return d
}
func randBuf(rng *rand.Rand, maxLen int) []byte {
buf := make([]byte, rng.Intn(maxLen+1))
_, _ = rng.Read(buf)
return buf
}
func encodeDecimalWithDir(dir Direction, buf []byte, d *apd.Decimal) []byte {
if dir == Ascending {
return EncodeDecimalAscending(buf, d)
}
return EncodeDecimalDescending(buf, d)
}
func decodeDecimalWithDir(
t *testing.T, dir Direction, buf []byte, tmp []byte,
) ([]byte, apd.Decimal) {
var err error
var resBuf []byte
var res apd.Decimal
if dir == Ascending {
resBuf, res, err = DecodeDecimalAscending(buf, tmp)
} else {
resBuf, res, err = DecodeDecimalDescending(buf, tmp)
}
if err != nil {
t.Fatal(err)
}
return resBuf, res
}
func mustDecimalFloat64(f float64) *apd.Decimal {
d, err := new(apd.Decimal).SetFloat64(f)
if err != nil {
panic(err)
}
return d
}
func mustDecimalString(s string) *apd.Decimal {
d, _, err := apd.NewFromString(s)
if err != nil {
panic(err)
}
return d
}
func TestEncodeDecimal(t *testing.T) {
testCases := []struct {
Value *apd.Decimal
Encoding []byte
}{
{&apd.Decimal{Form: apd.NaN}, []byte{0x18}},
{&apd.Decimal{Form: apd.Infinite, Negative: true}, []byte{0x19}},
{apd.New(-99122, 99999), []byte{0x1a, 0x86, 0x3c, 0xad, 0x38, 0xe6, 0xd7, 0x00}},
// Three duplicates to make sure -13*10^1000 <= -130*10^999 <= -13*10^1000
{apd.New(-13, 1000), []byte{0x1a, 0x86, 0xfe, 0x0a, 0xe5, 0x00}},
{apd.New(-130, 999), []byte{0x1a, 0x86, 0xfe, 0x0a, 0xe5, 0x00}},
{apd.New(-13, 1000), []byte{0x1a, 0x86, 0xfe, 0x0a, 0xe5, 0x00}},
{mustDecimalFloat64(-math.MaxFloat64), []byte{0x1a, 0x87, 0x64, 0xfc, 0x60, 0x66, 0x44, 0xe4, 0x9e, 0x82, 0xc0, 0x8d, 0x00}},
{apd.New(-130, 100), []byte{0x1a, 0x87, 0xcb, 0xfc, 0xc3, 0x00}},
{apd.New(-13, 0), []byte{0x24, 0xe5, 0x00}},
{apd.New(-11, 0), []byte{0x24, 0xe9, 0x00}},
{mustDecimal("-10.123456789"), []byte{0x24, 0xea, 0xe6, 0xba, 0x8e, 0x62, 0x4b, 0x00}},
{mustDecimal("-10"), []byte{0x24, 0xeb, 0x00}},
{mustDecimal("-9.123456789"), []byte{0x24, 0xec, 0xe6, 0xba, 0x8e, 0x62, 0x4b, 0x00}},
{mustDecimal("-9"), []byte{0x24, 0xed, 0x00}},
{mustDecimal("-1.1"), []byte{0x24, 0xfc, 0xeb, 0x00}},
{apd.New(-1, 0), []byte{0x24, 0xfd, 0x00}},
{apd.New(-8, -1), []byte{0x25, 0x5f, 0x00}},
{apd.New(-1, -1), []byte{0x25, 0xeb, 0x00}},
{mustDecimal("-.09"), []byte{0x25, 0xed, 0x00}},
{mustDecimal("-.054321"), []byte{0x25, 0xf4, 0xa8, 0xd5, 0x00}},
{mustDecimal("-.012"), []byte{0x25, 0xfc, 0xd7, 0x00}},
{apd.New(-11, -4), []byte{0x26, 0x89, 0xe9, 0x00}},
{apd.New(-11, -6), []byte{0x26, 0x8a, 0xe9, 0x00}},
{mustDecimalFloat64(-math.SmallestNonzeroFloat64), []byte{0x26, 0xf6, 0xa1, 0xf5, 0x00}},
{apd.New(-11, -66666), []byte{0x26, 0xf7, 0x82, 0x34, 0xe9, 0x00}},
{mustDecimal("-0"), []byte{0x27}},
{apd.New(0, 0), []byte{0x27}},
{mustDecimalFloat64(math.SmallestNonzeroFloat64), []byte{0x28, 0x87, 0x5e, 0x0a, 0x00}},
{apd.New(11, -6), []byte{0x28, 0x87, 0xfd, 0x16, 0x00}},
{apd.New(11, -4), []byte{0x28, 0x87, 0xfe, 0x16, 0x00}},
{apd.New(1, -1), []byte{0x29, 0x14, 0x00}},
{apd.New(8, -1), []byte{0x29, 0xa0, 0x00}},
{apd.New(1, 0), []byte{0x2a, 0x02, 0x00}},
{mustDecimal("1.1"), []byte{0x2a, 0x03, 0x14, 0x00}},
{apd.New(11, 0), []byte{0x2a, 0x16, 0x00}},
{apd.New(13, 0), []byte{0x2a, 0x1a, 0x00}},
{mustDecimalFloat64(math.MaxFloat64), []byte{0x34, 0xf6, 0x9b, 0x03, 0x9f, 0x99, 0xbb, 0x1b, 0x61, 0x7d, 0x3f, 0x72, 0x00}},
// Four duplicates to make sure 13*10^1000 <= 130*10^999 <= 1300*10^998 <= 13*10^1000
{apd.New(13, 1000), []byte{0x34, 0xf7, 0x01, 0xf5, 0x1a, 0x00}},
{apd.New(130, 999), []byte{0x34, 0xf7, 0x01, 0xf5, 0x1a, 0x00}},
{apd.New(1300, 998), []byte{0x34, 0xf7, 0x01, 0xf5, 0x1a, 0x00}},
{apd.New(13, 1000), []byte{0x34, 0xf7, 0x01, 0xf5, 0x1a, 0x00}},
{apd.New(99122, 99999), []byte{0x34, 0xf7, 0xc3, 0x52, 0xc7, 0x19, 0x28, 0x00}},
{apd.New(99122839898321208, 99999), []byte{0x34, 0xf7, 0xc3, 0x58, 0xc7, 0x19, 0x39, 0x4f, 0xb3, 0xa7, 0x2b, 0x29, 0xa0, 0x00}},
{&apd.Decimal{Form: apd.Infinite}, []byte{0x35}},
}
rng, _ := randutil.NewTestRand()
var lastEncoded []byte
for _, dir := range []Direction{Ascending, Descending} {
for _, tmp := range [][]byte{nil, make([]byte, 0, 100)} {
for i, c := range testCases {
t.Run(fmt.Sprintf("%v_%d_%d_%s", dir, cap(tmp), i, c.Value), func(t *testing.T) {
enc := encodeDecimalWithDir(dir, nil, c.Value)
_, dec := decodeDecimalWithDir(t, dir, enc, tmp)
if dir == Ascending && !bytes.Equal(enc, c.Encoding) {
t.Errorf("unexpected mismatch for %s. expected [% x], got [% x]",
c.Value, c.Encoding, enc)
}
if i > 0 {
if (bytes.Compare(lastEncoded, enc) > 0 && dir == Ascending) ||
(bytes.Compare(lastEncoded, enc) < 0 && dir == Descending) {
t.Errorf("%v: expected [% x] to be less than or equal to [% x]",
c.Value, testCases[i-1].Encoding, enc)
}
}
testPeekLength(t, enc)
if dec.Cmp(c.Value) != 0 {
t.Errorf("%d unexpected mismatch for %v. got %v", i, c.Value, dec)
}
lastEncoded = enc
// Test that appending the decimal to an existing buffer works. It
// is important to test with various values, slice lengths, and
// capacities because the various encoding paths try to use any
// spare capacity to avoid allocations.
for trials := 0; trials < 5; trials++ {
orig := randBuf(rng, 30)
origLen := len(orig)
bufCap := origLen + rng.Intn(30)
buf := make([]byte, origLen, bufCap)
copy(buf, orig)
enc := encodeDecimalWithDir(dir, buf, c.Value)
// Append some random bytes
enc = append(enc, randBuf(rng, 20)...)
_, dec := decodeDecimalWithDir(t, dir, enc[origLen:], tmp)
if dec.Cmp(c.Value) != 0 {
t.Errorf("unexpected mismatch for %v. got %v", c.Value, dec)
}
// Verify the existing values weren't modified.
for i := range orig {
if enc[i] != orig[i] {
t.Errorf("existing byte %d changed after encoding (from %d to %d)",
i, orig[i], enc[i])
}
}
}
})
}
}
}
}
func TestEncodeDecimalRand(t *testing.T) {
rng, _ := randutil.NewTestRand()
// Test both directions.
for _, dir := range []Direction{Ascending, Descending} {
var prev *apd.Decimal
var prevEnc []byte
const randomTrials = 100000
for i := 0; i < randomTrials; i++ {
cur := randDecimal(rng, -20, 20)
var tmp, appendTo []byte
// Test with and without appending.
if rng.Intn(2) == 1 {
appendTo = randBuf(rng, 30)
appendTo = appendTo[:rng.Intn(len(appendTo)+1)]
}
// Test with and without tmp buffer.
if rng.Intn(2) == 1 {
tmp = randBuf(rng, 100)
}
var enc []byte
var res apd.Decimal
var err error
if dir == Ascending {
enc = EncodeDecimalAscending(appendTo, cur)
enc = enc[len(appendTo):]
_, res, err = DecodeDecimalAscending(enc, tmp)
} else {
enc = EncodeDecimalDescending(appendTo, cur)
enc = enc[len(appendTo):]
_, res, err = DecodeDecimalDescending(enc, tmp)
}
if err != nil {
t.Fatal(err)
}
testPeekLength(t, enc)
// Make sure we decode the same value we encoded.
if cur.Cmp(&res) != 0 {
t.Fatalf("unexpected mismatch for %v, got %v", cur, res)
}
// Make sure lexicographical sorting is consistent.
if prev != nil {
bytesCmp := bytes.Compare(prevEnc, enc)
cmpType := "same"
if dir == Descending {
bytesCmp *= -1
cmpType = "inverse"
}
if decCmp := prev.Cmp(cur); decCmp != bytesCmp {
t.Fatalf("expected [% x] to compare to [% x] the %s way that %v compares to %v",
prevEnc, enc, cmpType, prev, cur)
}
}
prev = cur
prevEnc = enc
}
}
}
func TestNonsortingEncodeDecimal(t *testing.T) {
testCases := []struct {
Value *apd.Decimal
Encoding []byte
}{
{&apd.Decimal{Form: apd.NaN}, []byte{0x18}},
{&apd.Decimal{Form: apd.Infinite, Negative: true}, []byte{0x19}},
{apd.New(-99122, 99999), []byte{0x1a, 0xf8, 0x01, 0x86, 0xa4, 0x01, 0x83, 0x32}},
// Three duplicates to make sure -13*10^1000 <= -130*10^999 <= -13*10^1000
{apd.New(-13, 1000), []byte{0x1a, 0xf7, 0x03, 0xea, 0x0d}},
{apd.New(-130, 999), []byte{0x1a, 0xf7, 0x03, 0xea, 0x82}},
{apd.New(-13, 1000), []byte{0x1a, 0xf7, 0x03, 0xea, 0x0d}},
{mustDecimalFloat64(-math.MaxFloat64), []byte{0x1a, 0xf7, 0x01, 0x35, 0x3f, 0xdd, 0xec, 0x7f, 0x2f, 0xaf, 0x35}},
{apd.New(-130, 100), []byte{0x1a, 0xef, 0x82}},
{apd.New(-13, 0), []byte{0x1a, 0x8a, 0x0d}},
{apd.New(-11, 0), []byte{0x1a, 0x8a, 0x0b}},
{apd.New(-1, 0), []byte{0x1a, 0x89, 0x01}},
{apd.New(-8, -1), []byte{0x25, 0x08}},
{apd.New(-1, -1), []byte{0x25, 0x01}},
{apd.New(-11, -4), []byte{0x26, 0x8a, 0x0b}},
{apd.New(-11, -6), []byte{0x26, 0x8c, 0x0b}},
{mustDecimalFloat64(-math.SmallestNonzeroFloat64), []byte{0x26, 0xf7, 0x01, 0x43, 0x05}},
{apd.New(-11, -66666), []byte{0x26, 0xf8, 0x01, 0x04, 0x68, 0x0b}},
{mustDecimal("-0"), []byte{0x1a, 0x89}},
{apd.New(0, 0), []byte{0x27}},
{mustDecimalFloat64(math.SmallestNonzeroFloat64), []byte{0x28, 0xf7, 0x01, 0x43, 0x05}},
{apd.New(11, -6), []byte{0x28, 0x8c, 0x0b}},
{apd.New(11, -4), []byte{0x28, 0x8a, 0x0b}},
{apd.New(1, -1), []byte{0x29, 0x01}},
{apd.New(12345, -5), []byte{0x29, 0x30, 0x39}},
{apd.New(8, -1), []byte{0x29, 0x08}},
{apd.New(1, 0), []byte{0x34, 0x89, 0x01}},
{apd.New(11, 0), []byte{0x34, 0x8a, 0x0b}},
{apd.New(13, 0), []byte{0x34, 0x8a, 0x0d}},
// Note that this does not sort correctly!
{apd.New(255, 0), []byte{0x34, 0x8b, 0xff}},
{apd.New(256, 0), []byte{0x34, 0x8b, 0x01, 0x00}},
{mustDecimalFloat64(math.MaxFloat64), []byte{0x34, 0xf7, 0x01, 0x35, 0x3f, 0xdd, 0xec, 0x7f, 0x2f, 0xaf, 0x35}},
// Four duplicates to make sure 13*10^1000 <= 130*10^999 <= 1300*10^998 <= 13*10^1000
{apd.New(13, 1000), []byte{0x34, 0xf7, 0x03, 0xea, 0x0d}},
{apd.New(130, 999), []byte{0x34, 0xf7, 0x03, 0xea, 0x82}},
{apd.New(1300, 998), []byte{0x34, 0xf7, 0x03, 0xea, 0x05, 0x14}},
{apd.New(13, 1000), []byte{0x34, 0xf7, 0x03, 0xea, 0x0d}},
{apd.New(99122, 99999), []byte{0x34, 0xf8, 0x01, 0x86, 0xa4, 0x01, 0x83, 0x32}},
{apd.New(99122839898321208, 99999), []byte{0x34, 0xf8, 0x01, 0x86, 0xb0, 0x01, 0x60, 0x27, 0xb2, 0x9d, 0x44, 0x71, 0x38}},
{&apd.Decimal{Form: apd.Infinite}, []byte{0x35}},
{mustDecimalString("142378208485490985369999605144727062141206925976498256305323716858805588894693616552055968571135475510700810219028167653516982373238641332965927953273383572708760984694356069974208844865675206339235758647159337463780100273189720943242182911961627806424621091859596571173867825568394327041453823674373002756096"), []byte{0x34, 0xf7, 0x01, 0x35, 0xca, 0xc0, 0xd8, 0x34, 0x68, 0x5d, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
}
rng, _ := randutil.NewTestRand()
for _, tmp := range [][]byte{nil, make([]byte, 0, 100)} {
for i, c := range testCases {
t.Run(fmt.Sprintf("%d_%d_%s", cap(tmp), i, c.Value), func(t *testing.T) {
enc := EncodeNonsortingDecimal(nil, c.Value)
dec, err := DecodeNonsortingDecimal(enc, tmp)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(enc, c.Encoding) {
t.Errorf("unexpected mismatch for %s. expected [% x], got [% x]",
c.Value, c.Encoding, enc)
}
if dec.CmpTotal(c.Value) != 0 {
t.Errorf("%d unexpected mismatch for %v. got %v", i, c.Value, dec)
}
// Test that appending the decimal to an existing buffer works. It
// is important to test with various values, slice lengths, and
// capacities because the various encoding paths try to use any
// spare capacity to avoid allocations.
for trials := 0; trials < 5; trials++ {
orig := randBuf(rng, 30)
origLen := len(orig)
bufCap := origLen + rng.Intn(30)
buf := make([]byte, origLen, bufCap)
copy(buf, orig)
enc := EncodeNonsortingDecimal(buf, c.Value)
dec, err := DecodeNonsortingDecimal(enc[origLen:], tmp)
if err != nil {
t.Fatal(err)
}
if dec.CmpTotal(c.Value) != 0 {
t.Errorf("unexpected mismatch for %v. got %v", c.Value, dec)
}
// Verify the existing values weren't modified.
for i := range orig {
if enc[i] != orig[i] {
t.Errorf("existing byte %d changed after encoding (from %d to %d)",
i, orig[i], enc[i])
}
}
}
})
}
}
}
func TestNonsortingEncodeDecimalRand(t *testing.T) {
rng, _ := randutil.NewTestRand()
const randomTrials = 200000
for i := 0; i < randomTrials; i++ {
var tmp, appendTo []byte
// Test with and without appending.
if rng.Intn(2) == 1 {
appendTo = randBuf(rng, 30)
appendTo = appendTo[:rng.Intn(len(appendTo)+1)]
}
// Test with and without tmp buffer.
if rng.Intn(2) == 1 {
tmp = randBuf(rng, 100)
}
cur := randDecimal(rng, -20, 20)
enc := EncodeNonsortingDecimal(appendTo, cur)
enc = enc[len(appendTo):]
res, err := DecodeNonsortingDecimal(enc, tmp)
if err != nil {
t.Fatal(err)
}
// Make sure we decode the same value we encoded.
if cur.Cmp(&res) != 0 {
t.Fatalf("unexpected mismatch for %v, got %v", cur, res)
}
// Make sure we would have overestimated the value.
if est := UpperBoundNonsortingDecimalSize(cur); est < len(enc) {
t.Fatalf("expected estimate of %d for %v to be greater than or equal to the encoded length, found [% x]", est, cur, enc)
}
}
}
// TestNonsortingEncodeDecimalRoundtrip tests that decimals can round trip
// through EncodeNonsortingDecimal and DecodeNonsortingDecimal with an expected
// coefficient and exponent.
func TestNonsortingEncodeDecimalRoundtrip(t *testing.T) {
tests := map[string]string{
"0": "0E+0",
"0.0": "0E-1",
"0.00": "0E-2",
"0e-10": "0E-10",
"0.00e-10": "0E-12",
"00": "0E+0",
"-0": "-0E+0",
"-0.0": "-0E-1",
"-0.00": "-0E-2",
"-0e-10": "-0E-10",
"-0.00e-10": "-0E-12",
"-00": "-0E+0",
}
for tc, expect := range tests {
t.Run(tc, func(t *testing.T) {
d, _, err := apd.NewFromString(tc)
if err != nil {
t.Fatal(err)
}
enc := EncodeNonsortingDecimal(nil, d)
res, err := DecodeNonsortingDecimal(enc, nil)
if err != nil {
t.Fatal(err)
}
s := res.Text('E')
if expect != s {
t.Fatalf("expected %s, got %s", expect, s)
}
})
}
}
func TestDecodeMultipleDecimalsIntoNonsortingDecimal(t *testing.T) {
tcs := []struct {
value []string
}{
{
[]string{"1.0", "5.0", "7.0"},
},
{
[]string{"1.0", "-1.0", "0.0"},
},
{
[]string{"1.0", "-1.0", "10.0"},
},
{
[]string{"nan", "1.0", "-1.0"},
},
{
[]string{"-1.0", "inf", "5.0"},
},
}
for _, tc := range tcs {
var actual apd.Decimal
for _, num := range tc.value {
expected, _, err := apd.NewFromString(num)
if err != nil {
t.Fatal(err)
}
enc := EncodeNonsortingDecimal(nil, expected)
err = DecodeIntoNonsortingDecimal(&actual, enc, nil)
if err != nil {
t.Fatal(err)
}
if actual.Cmp(expected) != 0 {
t.Errorf("unexpected mismatch for %v, got %v", expected, &actual)
}
}
}
}
func TestUpperBoundNonsortingDecimalUnscaledSize(t *testing.T) {
x := make([]byte, 100)
d := new(apd.Decimal)
for i := 0; i < len(x); i++ {
d.Coeff.SetString(string(x[:i]), 10)
reference := UpperBoundNonsortingDecimalSize(d)
bound := upperBoundNonsortingDecimalUnscaledSize(i)
if bound < reference || bound > reference+bigWordSize {
t.Errorf("%d: got a bound of %d but expected between %d and %d", i, bound, reference, reference+bigWordSize)
}
x[i] = '1'
}
}
// randDecimal generates a random decimal with exponent in the
// range [minExp, maxExp].
func randDecimal(rng *rand.Rand, minExp, maxExp int) *apd.Decimal {
exp := randutil.RandIntInRange(rng, minExp, maxExp+1)
// Transform random float in [0, 1) to [-1, 1) and multiply by 10^exp.
floatVal := (rng.Float64()*2 - 1) * math.Pow10(exp)
return mustDecimalFloat64(floatVal)
}
// makeDecimalVals creates decimal values with exponents in
// the range [minExp, maxExp].
func makeDecimalVals(minExp, maxExp int) []*apd.Decimal {
rng, _ := randutil.NewTestRand()
vals := make([]*apd.Decimal, 10000)
for i := range vals {
vals[i] = randDecimal(rng, minExp, maxExp)
}
return vals
}
func makeEncodedVals(minExp, maxExp int) [][]byte {
rng, _ := randutil.NewTestRand()
vals := make([][]byte, 10000)
for i := range vals {
vals[i] = EncodeDecimalAscending(nil, randDecimal(rng, minExp, maxExp))
}
return vals
}
func BenchmarkEncodeDecimalSmall(b *testing.B) {
vals := makeDecimalVals(-40, -1)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = EncodeDecimalAscending(buf, vals[i%len(vals)])
}
}
func BenchmarkDecodeDecimalSmall(b *testing.B) {
vals := makeEncodedVals(-40, -1)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _ = DecodeDecimalAscending(vals[i%len(vals)], buf)
}
}
func BenchmarkEncodeDecimalMedium(b *testing.B) {
vals := makeDecimalVals(0, 10)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = EncodeDecimalAscending(buf, vals[i%len(vals)])
}
}
func BenchmarkDecodeDecimalMedium(b *testing.B) {
vals := makeEncodedVals(0, 10)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _ = DecodeDecimalAscending(vals[i%len(vals)], buf)
}
}
func BenchmarkEncodeDecimalLarge(b *testing.B) {
vals := makeDecimalVals(11, 40)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = EncodeDecimalAscending(buf, vals[i%len(vals)])
}
}
func BenchmarkDecodeDecimalLarge(b *testing.B) {
vals := makeEncodedVals(11, 40)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _ = DecodeDecimalAscending(vals[i%len(vals)], buf)
}
}
func BenchmarkPeekLengthDecimal(b *testing.B) {
vals := makeEncodedVals(-20, 20)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = PeekLength(vals[i%len(vals)])
}
}
func BenchmarkNonsortingEncodeDecimal(b *testing.B) {
vals := makeDecimalVals(-20, 20)
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = EncodeNonsortingDecimal(buf, vals[i%len(vals)])
}
}
func BenchmarkNonsortingDecodeDecimal(b *testing.B) {
rng, _ := randutil.NewTestRand()
vals := make([][]byte, 10000)
for i := range vals {
d := randDecimal(rng, -20, 20)
vals[i] = EncodeNonsortingDecimal(nil, d)
}
buf := make([]byte, 0, 100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = DecodeNonsortingDecimal(vals[i%len(vals)], buf)
}
}
| pkg/util/encoding/decimal_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0022496841847896576,
0.0002302622888237238,
0.00016409529780503362,
0.0001702490117168054,
0.00031804226455278695
] |
{
"id": 4,
"code_window": [
"\tdefer tc.Stopper().Stop(ctx)\n",
"\tsqlDB := tc.ServerConn(0)\n",
"\ttdb := sqlutils.MakeSQLRunner(sqlDB)\n",
"\t// Create table with a primary key constraint.\n",
"\ttdb.Exec(t, \"CREATE TABLE t(name int primary key)\")\n",
"\t// Validate the comments on constraints are blocked.\n",
"\ttdb.ExpectErr(t,\n",
"\t\t\"pq: cannot comment on constraint\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\ttdb.Exec(t, \"CREATE TABLE dep_t (fk INT8 PRIMARY KEY);\")\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t (\n",
"\tname INT8 PRIMARY KEY,\n",
"\tval INT8 REFERENCES dep_t (fk),\n",
"\tval2 INT8 CHECK (val2 > 0)\n",
");`,\n",
"\t)\n",
"\ttdb.Exec(t,\n",
"\t\t`CREATE TABLE t_fk_dst (name INT8 PRIMARY KEY, val INT8 REFERENCES t (name));`)\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 58
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT CASE
WHEN 1 THEN 2
WHEN 3 THEN 4
ELSE NULL
END
| pkg/sql/sem/tree/testdata/pretty/case.align-only.golden.short | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017317170568276197,
0.00017284663044847548,
0.00017252154066227376,
0.00017284663044847548,
3.250825102441013e-7
] |
{
"id": 5,
"code_window": [
"\t\t\"COMMENT ON CONSTRAINT \\\"t_pkey\\\" ON t IS 'primary_comment'\")\n",
"\t// Validate that we have a constraint ID due to post deserialization logic\n",
"\n",
"\tdesc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// Reset all constraint IDs\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 66
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.9984048008918762,
0.3577752411365509,
0.00016313631203956902,
0.0016831557732075453,
0.4748539924621582
] |
{
"id": 5,
"code_window": [
"\t\t\"COMMENT ON CONSTRAINT \\\"t_pkey\\\" ON t IS 'primary_comment'\")\n",
"\t// Validate that we have a constraint ID due to post deserialization logic\n",
"\n",
"\tdesc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// Reset all constraint IDs\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 66
} | // Code generated by generate-staticcheck; DO NOT EDIT.
//go:build bazel
// +build bazel
package sa4019
import (
util "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/staticcheck"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/staticcheck"
)
var Analyzer *analysis.Analyzer
func init() {
for _, analyzer := range staticcheck.Analyzers {
if analyzer.Analyzer.Name == "SA4019" {
Analyzer = analyzer.Analyzer
break
}
}
util.MungeAnalyzer(Analyzer)
}
| build/bazelutil/staticcheckanalyzers/sa4019/analyzer.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017487934383098036,
0.0001744754408719018,
0.00017398552154190838,
0.00017456148634664714,
3.6993799312767806e-7
] |
{
"id": 5,
"code_window": [
"\t\t\"COMMENT ON CONSTRAINT \\\"t_pkey\\\" ON t IS 'primary_comment'\")\n",
"\t// Validate that we have a constraint ID due to post deserialization logic\n",
"\n",
"\tdesc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// Reset all constraint IDs\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 66
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
//go:build fast_int_set_large
// +build fast_int_set_large
package util
var fastIntSetAlwaysSmall = false
| pkg/util/fast_int_set_large.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001781141327228397,
0.00017362250946462154,
0.00016913087165448815,
0.00017362250946462154,
0.000004491630534175783
] |
{
"id": 5,
"code_window": [
"\t\t\"COMMENT ON CONSTRAINT \\\"t_pkey\\\" ON t IS 'primary_comment'\")\n",
"\t// Validate that we have a constraint ID due to post deserialization logic\n",
"\n",
"\tdesc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t// Reset all constraint IDs\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 66
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// {{/*
//go:build execgen_template
// +build execgen_template
//
// This file is the execgen template for min_max_removable_agg.eg.go. It's
// formatted in a special way, so it's both valid Go and a valid text/template
// input. This permits editing this file with editor support.
//
// */}}
package colexecwindow
import (
"context"
"github.com/cockroachdb/apd/v3"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldataext"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/memsize"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/json"
"github.com/cockroachdb/errors"
)
// Workaround for bazel auto-generated code. goimports does not automatically
// pick up the right packages when run within the bazel sandbox.
var (
_ tree.AggType
_ apd.Context
_ duration.Duration
_ json.JSON
_ = coldataext.CompareDatum
_ = colexecerror.InternalError
_ = memsize.Uint32
)
// {{/*
// Declarations to make the template compile properly.
// _ASSIGN_CMP is the template function for assigning true to the first input
// if the second input compares successfully to the third input. The comparison
// operator is tree.LT for MIN and is tree.GT for MAX.
func _ASSIGN_CMP(_, _, _, _, _, _ string) bool {
colexecerror.InternalError(errors.AssertionFailedf(""))
}
// */}}
const (
// The argument column is always the first column in the SpillingBuffer.
argColIdx = 0
// The slice of uint32s in the deque can have up to 10,000 values (40KB).
maxQueueLength = 10000
)
type minMaxRemovableAggBase struct {
partitionSeekerBase
colexecop.CloserHelper
allocator *colmem.Allocator
outputColIdx int
framer windowFramer
// A partial deque of indices into the current partition ordered by the value
// of the input column at each index. It contains only indices that are part
// of the current window frame. The first value in the queue is the index of
// the current value for the aggregation (NULL if empty). Under the
// simplifying assumption that the window frame has no exclusion clause, the
// queue does not need to contain any indices smaller than the best index -
// this keeps the queue small in many common cases.
queue minMaxQueue
// omittedIndex tracks the index where we reached the limit of the length of
// the queue, in which case we may be omitting values that could become
// relevant as the frame shrinks. If the queue becomes empty while this
// index is set, we have to aggregate over the previously omitted values.
// The default (unset) value is -1.
omittedIndex int
scratchIntervals []windowInterval
}
// Init implements the bufferedWindower interface.
func (b *minMaxRemovableAggBase) Init(ctx context.Context) {
b.InitHelper.Init(ctx)
}
// transitionToProcessing implements the bufferedWindower interface.
func (b *minMaxRemovableAggBase) transitionToProcessing() {
b.framer.startPartition(b.Ctx, b.partitionSize, b.buffer)
}
// startNewPartition implements the bufferedWindower interface.
func (b *minMaxRemovableAggBase) startNewPartition() {
b.partitionSize = 0
b.buffer.Reset(b.Ctx)
b.queue.reset()
}
// {{range .}}
// {{$agg := .Agg}}
func new_AGG_TITLERemovableAggregator(
args *WindowArgs, framer windowFramer, buffer *colexecutils.SpillingBuffer, argTyp *types.T,
) bufferedWindower {
// Reserve the maximum memory usable by the queue up front to ensure that it
// isn't used by the SpillingBuffer.
args.BufferAllocator.AdjustMemoryUsage(maxQueueLength * memsize.Uint32)
base := minMaxRemovableAggBase{
partitionSeekerBase: partitionSeekerBase{
partitionColIdx: args.PartitionColIdx,
buffer: buffer,
},
allocator: args.MainAllocator,
outputColIdx: args.OutputColIdx,
framer: framer,
queue: newMinMaxQueue(maxQueueLength),
omittedIndex: -1,
}
switch typeconv.TypeFamilyToCanonicalTypeFamily(argTyp.Family()) {
// {{range .Overloads}}
case _CANONICAL_TYPE_FAMILY:
switch argTyp.Width() {
// {{range .WidthOverloads}}
case _TYPE_WIDTH:
return &_AGG_TYPEAggregator{minMaxRemovableAggBase: base}
// {{end}}
}
// {{end}}
}
colexecerror.InternalError(
errors.AssertionFailedf("unexpectedly didn't find _AGG overload for %s type family", argTyp.Name()))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
// {{range .Overloads}}
// {{range .WidthOverloads}}
type _AGG_TYPEAggregator struct {
minMaxRemovableAggBase
// curAgg holds the running min/max, so we can index into the output column
// once per row, instead of on each iteration.
// NOTE: if the length of the queue is zero, curAgg is undefined.
curAgg _GOTYPE
}
// processBatch implements the bufferedWindower interface.
func (a *_AGG_TYPEAggregator) processBatch(batch coldata.Batch, startIdx, endIdx int) {
if endIdx <= startIdx {
// There is no processing to be done.
return
}
outVec := batch.ColVec(a.outputColIdx)
outNulls := outVec.Nulls()
outCol := outVec.TemplateType()
// {{if not .IsBytesLike}}
_, _ = outCol.Get(startIdx), outCol.Get(endIdx-1)
// {{end}}
a.allocator.PerformOperation([]coldata.Vec{outVec}, func() {
for i := startIdx; i < endIdx; i++ {
a.framer.next(a.Ctx)
toAdd, toRemove := a.framer.slidingWindowIntervals()
// Process the toRemove intervals first.
if !a.queue.isEmpty() {
prevBestIdx := a.queue.getFirst()
for _, interval := range toRemove {
if uint32(interval.start) > a.queue.getFirst() {
colexecerror.InternalError(errors.AssertionFailedf(
"expected default exclusion clause for min/max sliding window operator"))
}
a.queue.removeAllBefore(uint32(interval.end))
}
if !a.queue.isEmpty() {
newBestIdx := a.queue.getFirst()
if newBestIdx != prevBestIdx {
// We need to update curAgg.
vec, idx, _ := a.buffer.GetVecWithTuple(a.Ctx, argColIdx, int(newBestIdx))
col := vec.TemplateType()
val := col.Get(idx)
execgen.COPYVAL(a.curAgg, val)
}
}
}
// Now aggregate over the toAdd intervals.
if a.queue.isEmpty() && a.omittedIndex != -1 {
// We have exhausted all the values that fit in the queue - we need to
// re-aggregate over the current window frame starting from the first
// omitted index.
a.scratchIntervals = getIntervalsGEIdx(
a.framer.frameIntervals(), a.scratchIntervals, a.omittedIndex)
a.omittedIndex = -1
a.aggregateOverIntervals(a.scratchIntervals)
} else {
a.aggregateOverIntervals(toAdd)
}
// Set the output value for the current row.
if a.queue.isEmpty() {
outNulls.SetNull(i)
} else {
// {{if not .IsBytesLike}}
// gcassert:bce
// {{end}}
outCol.Set(i, a.curAgg)
}
}
})
}
// aggregateOverIntervals accumulates all rows represented by the given
// intervals into the current aggregate.
func (a *_AGG_TYPEAggregator) aggregateOverIntervals(intervals []windowInterval) {
for _, interval := range intervals {
var cmp bool
for j := interval.start; j < interval.end; j++ {
idxToAdd := uint32(j)
vec, idx, _ := a.buffer.GetVecWithTuple(a.Ctx, argColIdx, j)
nulls := vec.Nulls()
col := vec.TemplateType()
if !nulls.MaybeHasNulls() || !nulls.NullAt(idx) {
val := col.Get(idx)
// If this is the first value in the frame, it is the best so far.
isBest := a.queue.isEmpty()
if !a.queue.isEmpty() {
// Compare to the best value seen so far.
_ASSIGN_CMP(cmp, val, a.curAgg, _, col, _)
if cmp {
// Reset the queue because the current value replaces all others.
isBest = true
a.queue.reset()
}
isBest = cmp
}
if isBest {
// The queue is already empty, so just add to the end of the queue.
// If any values were omitted from the queue, they would be dominated
// by this one anyway, so reset omittedIndex.
a.queue.addLast(idxToAdd)
execgen.COPYVAL(a.curAgg, val)
a.omittedIndex = -1
continue
}
// This is not the best value in the window frame, but we still need to
// keep it in the queue. Iterate from the end of the queue, removing any
// values that are dominated by the current one. Add the current value
// once the last value in the queue is better than the current one.
if !a.queue.isEmpty() {
// We have to make a copy of val because GetVecWithTuple
// calls below might reuse the same underlying vector.
var valCopy _GOTYPE
execgen.COPYVAL(valCopy, val)
for !a.queue.isEmpty() {
cmpVec, cmpIdx, _ := a.buffer.GetVecWithTuple(a.Ctx, argColIdx, int(a.queue.getLast()))
cmpVal := cmpVec.TemplateType().Get(cmpIdx)
_ASSIGN_CMP(cmp, cmpVal, valCopy, _, col, _)
if cmp {
break
}
// Any values that could not fit in the queue would also have been
// dominated by the current one, so reset omittedIndex.
a.queue.removeLast()
a.omittedIndex = -1
}
}
if a.queue.addLast(idxToAdd) && a.omittedIndex == -1 {
// The value couldn't fit in the queue. Keep track of the first index
// from which the queue could no longer store values.
a.omittedIndex = j
}
}
}
}
}
func (a *_AGG_TYPEAggregator) Close(ctx context.Context) {
a.queue.close()
a.framer.close()
a.buffer.Close(ctx)
*a = _AGG_TYPEAggregator{}
}
// {{end}}
// {{end}}
// {{end}}
// getIntervalsGEIdx returns a set of intervals representing all indexes in the
// 'intervals' slice at or after the given index.
func getIntervalsGEIdx(intervals, scratch []windowInterval, idx int) []windowInterval {
scratch = scratch[:0]
for _, interval := range intervals {
if interval.end <= idx {
continue
}
if interval.start >= idx {
scratch = append(scratch, interval)
continue
}
scratch = append(scratch, windowInterval{start: idx, end: interval.end})
}
return scratch
}
| pkg/sql/colexec/colexecwindow/min_max_removable_agg_tmpl.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0004787520447280258,
0.00018772753537632525,
0.0001587389560882002,
0.0001701682631392032,
0.00007263065344886854
] |
{
"id": 6,
"code_window": [
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\trequire.NoError(t, s.DB().Put(\n",
"\t\tcontext.Background(),\n",
"\t\tcatalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.NoError(t, desc.ForeachOutboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\trequire.NoError(t, desc.ForeachInboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\tfor _, check := range desc.GetChecks() {\n",
"\t\tcheck.ConstraintID = 0\n",
"\t}\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 67
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.7914980053901672,
0.06092498451471329,
0.00016475142911076546,
0.00019521817739587277,
0.20303046703338623
] |
{
"id": 6,
"code_window": [
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\trequire.NoError(t, s.DB().Put(\n",
"\t\tcontext.Background(),\n",
"\t\tcatalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.NoError(t, desc.ForeachOutboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\trequire.NoError(t, desc.ForeachInboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\tfor _, check := range desc.GetChecks() {\n",
"\t\tcheck.ConstraintID = 0\n",
"\t}\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 67
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opgen
import (
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scop"
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
)
func init() {
opRegistry.register(
(*scpb.ForeignKeyBackReference)(nil),
toPublic(
scpb.Status_ABSENT,
to(scpb.Status_PUBLIC,
emit(func(this *scpb.ForeignKeyBackReference) scop.Op {
return notImplemented(this)
}),
),
),
toAbsent(
scpb.Status_PUBLIC,
to(scpb.Status_ABSENT,
minPhase(scop.PreCommitPhase),
revertible(false),
emit(func(this *scpb.ForeignKeyBackReference) scop.Op {
return &scop.DropForeignKeyRef{
TableID: this.OriginID,
Name: this.Name,
Outbound: false,
}
}),
),
),
)
}
| pkg/sql/schemachanger/scplan/internal/opgen/opgen_in_foreign_key.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017694916459731758,
0.00017088169988710433,
0.00016498156765010208,
0.00017157923139166087,
0.000003921778443327639
] |
{
"id": 6,
"code_window": [
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\trequire.NoError(t, s.DB().Put(\n",
"\t\tcontext.Background(),\n",
"\t\tcatalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.NoError(t, desc.ForeachOutboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\trequire.NoError(t, desc.ForeachInboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\tfor _, check := range desc.GetChecks() {\n",
"\t\tcheck.ConstraintID = 0\n",
"\t}\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 67
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package fs
import (
"bufio"
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/errors/oserror"
"github.com/cockroachdb/pebble/vfs"
)
const lockFilename = `TEMP_DIR.LOCK`
type lockStruct struct {
closer io.Closer
}
// lockFile sets a lock on the specified file, using flock.
func lockFile(filename string) (lockStruct, error) {
closer, err := vfs.Default.Lock(filename)
if err != nil {
return lockStruct{}, err
}
return lockStruct{closer: closer}, nil
}
// unlockFile unlocks the file associated with the specified lock and GCs any allocated memory for the lock.
func unlockFile(lock lockStruct) error {
if lock.closer != nil {
return lock.closer.Close()
}
return nil
}
// CreateTempDir creates a temporary directory with a prefix under the given
// parentDir and returns the absolute path of the temporary directory.
// It is advised to invoke CleanupTempDirs before creating new temporary
// directories in cases where the disk is completely full.
func CreateTempDir(parentDir, prefix string, stopper *stop.Stopper) (string, error) {
// We generate a unique temporary directory with the specified prefix.
tempPath, err := ioutil.TempDir(parentDir, prefix)
if err != nil {
return "", err
}
// TempDir creates a directory with permissions 0700. Manually change the
// permissions to be 0755 like every other directory created by cockroach.
if err := os.Chmod(tempPath, 0755); err != nil {
return "", err
}
absPath, err := filepath.Abs(tempPath)
if err != nil {
return "", err
}
// Create a lock file.
flock, err := lockFile(filepath.Join(absPath, lockFilename))
if err != nil {
return "", errors.Wrapf(err, "could not create lock on new temporary directory")
}
stopper.AddCloser(stop.CloserFn(func() {
if err := unlockFile(flock); err != nil {
log.Errorf(context.TODO(), "could not unlock file lock on temporary directory: %s", err.Error())
}
}))
return absPath, nil
}
// RecordTempDir records tempPath to the record file specified by recordPath to
// facilitate cleanup of the temporary directory on subsequent startups.
func RecordTempDir(recordPath, tempPath string) error {
// If the file does not exist, create it, or append to the file.
f, err := os.OpenFile(recordPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer f.Close()
// Record tempPath to the record file.
_, err = f.Write(append([]byte(tempPath), '\n'))
return err
}
// CleanupTempDirs removes all directories listed in the record file specified
// by recordPath.
// It should be invoked before creating any new temporary directories to clean
// up abandoned temporary directories.
// It should also be invoked when a newly created temporary directory is no
// longer needed and needs to be removed from the record file.
func CleanupTempDirs(recordPath string) error {
// Reading the entire file into memory shouldn't be a problem since
// it is extremely rare for this record file to contain more than a few
// entries.
f, err := os.OpenFile(recordPath, os.O_RDWR, 0644)
// There is no existing record file and thus nothing to clean up.
if oserror.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
// Iterate through each temporary directory path and remove the
// directory.
for scanner.Scan() {
path := scanner.Text()
if path == "" {
continue
}
// Check if the temporary directory exists; if it does not, skip over it.
if _, err := os.Stat(path); oserror.IsNotExist(err) {
log.Warningf(context.Background(), "could not locate previous temporary directory %s, might require manual cleanup, or might have already been cleaned up.", path)
continue
}
// Check if another Cockroach instance is using this temporary
// directory i.e. has a lock on the temp dir lock file.
flock, err := lockFile(filepath.Join(path, lockFilename))
if err != nil {
return errors.Wrapf(err, "could not lock temporary directory %s, may still be in use", path)
}
// On Windows, file locks are mandatory, so we must remove our lock on the
// lock file before we can remove the temporary directory. This yields a
// race condition: another process could start using the now-unlocked
// directory before we can remove it. Luckily, this doesn't matter, because
// these temporary directories are never reused. Any other process trying to
// lock this temporary directory is just trying to clean it up, too. Only
// the original process wants the data in this directory, and we know that
// process is dead because we were able to acquire the lock in the first
// place.
if err := unlockFile(flock); err != nil {
log.Errorf(context.TODO(), "could not unlock file lock when removing temporary directory: %s", err.Error())
}
// If path/directory does not exist, error is nil.
if err := os.RemoveAll(path); err != nil {
return err
}
}
// Clear out the record file now that we're done.
return f.Truncate(0)
}
| pkg/storage/fs/temp_dir.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00029832060681656003,
0.0001798126904759556,
0.000165000616107136,
0.0001708117051748559,
0.00003128354364889674
] |
{
"id": 6,
"code_window": [
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\trequire.NoError(t, s.DB().Put(\n",
"\t\tcontext.Background(),\n",
"\t\tcatalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.NoError(t, desc.ForeachOutboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\trequire.NoError(t, desc.ForeachInboundFK(func(constraint *descpb.ForeignKeyConstraint) error {\n",
"\t\tconstraint.ConstraintID = 0\n",
"\t\treturn nil\n",
"\t}))\n",
"\tfor _, check := range desc.GetChecks() {\n",
"\t\tcheck.ConstraintID = 0\n",
"\t}\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "add",
"edit_start_line_idx": 67
} | - Feature Name: Default Privileges
- Status: in-progress
- Start Date: 2021-05-20
- Authors: Richard Cai
- RFC PR: [65794](https://github.com/cockroachdb/cockroach/pull/65794)
- Cockroach Issue: [64613](https://github.com/cockroachdb/cockroach/issues/64613), [65604](https://github.com/cockroachdb/cockroach/issues/65604)
# Summary
Currently CockroachDB handles privilege “inheritance” on objects in an ad-hoc way.
When a table-like or schema object is created, it copies its privilege descriptor from the parent database and removes any invalid privileges.
To illustrate how our current privilege inheritance works, when we create a table’s privilege descriptor, we’ll create a copy of the parent database’s privilege descriptor and remove all “invalid” privileges. An invalid privilege in this case is a privilege that is valid for databases but not for tables. For example, if “CONNECT” privilege is specified for a user on a database, we have to flip the CONNECT bit off because there is no CONNECT privilege for tables.
We’ve defined certain privileges to be valid on databases to support this method of inheritance. This has caused our privilege model to further diverge from Postgres'.
Our goal here is to do-away with our ad-hoc privilege inheritance system and support default privileges like Postgres.
# Motivation
The main motivation for this change is to continue making our privilege system more compatible with Postgres. We want to reduce the burden on users to learn a new privilege system or have to learn the differences between the CockroachDB and Postgres privilege systems. We want as little friction as possible for users to use CockroachDB and thus privileges should be compatible with Postgres.
Overall our current privilege system can be confusing, our current privilege inheritance system follows neither the Postgres or MySQL semantics but is somewhat a mix of both.
# Technical design
### Default Privileges
The main technical change is to support default privileges. Ideally we want to support default privileges as closely as possible to Postgres’ default privileges.
Default privileges exist on schemas and databases. Note, Postgres defines default privileges defined for a database as “global” default privileges. Global makes more sense in the Postgres world as you cannot switch databases during a session like you can in CockroachDB. You can define default privileges for objects that are created by yourself or roles that you are a member of.
Default privileges define the set of privileges on newly created objects in a database or schema. Default privileges are able to be set for specific users. Changing default privileges on a database or schema does not change the privileges of existing objects in them.
Postgres defines which privileges are default on which objects. See https://www.postgresql.org/docs/current/ddl-priv.html
In summary about default privileges in Postgres:
No privileges are granted to the `public` role by default on tables, table columns, sequences, foreign data wrappers, foreign servers, large objects, schemas, or tablespaces.
CONNECT and TEMPORARY are granted to the `public` role by default for databases.
EXECUTE is granted to the `public` role by default for functions and procedures.
USAGE is granted to the `public` role by default for languages / data types (including domains)
Since CockroachDB does tsnot support all the object listed, the matrix below does a comparison to show how the default privileges in Postgres (for the PUBLIC role) will map to CockroachDB.
| | Postgres | CockroachDB |
| ----------------------------- | ------------------ | ---------------------------------------------- |
| Tables | None | None |
| Table Columns | None | N/A |
| Sequences | None | None |
| Foreign Data Wrappers | None | N/A |
| Foreign Servers | None | N/A |
| Large Objects | None | N/A |
| Schemas | None | None |
| Tablespaces | None | N/A |
| Databases | Connect, Temporary | Connect, Temporary (not yet supported in CRDB) |
| Functions / Stored Procedures | Execute | N/A |
| Languages | USAGE | N/A |
| Data types | USAGE | USAGE |
Once this work is complete, CockroachDB will support Postgres' concept of default privileges for all supported objects.
### Storing DEFAULT PRIVILEGES in CockroachDB
Currently, we store privileges in PrivilegeDescriptors. They exist on all “object” descriptors ie (TableDescriptor, DatabaseDescriptor). PrivilegeDescriptors carry UserPrivileges which is a list of users and their privileges as a bit field.
`pg_default_acl`
To store default privileges, we can continue to use PrivilegeDescriptors (either create a new protobuf field called “DefaultPrivileges” and add them to the object descriptors or we can directly add them to PrivilegeDescriptors. Or we can do what Postgres does and populate the table which dictates initial privileges for newly created objects. We currently do something similar to this with users, role membership and role options in `system.users`, `system.role_members`, and `system.role_options`.
#### Storing default privileges on the object descriptor (protobuf)
**Pros**:
- How we currently handle our object privileges.
- Can likely re-use existing code, ie. We can store the default privileges in a PrivilegeDescriptor and copy the default PrivilegeDescriptor to create the new object’s PrivilegeDescriptor.
- Can re-use bit manipulation / validation code easily.
- Do not have to query system table to look for privileges
- System table queries have been a source of slowness for multi region setups
- Can grab PrivilegeDescriptor in O(1) time when creating a new object from the parent object.
- Descriptors are currently effectively cached whereas system tables are not.
**Cons**:
- Must loop through all object descriptors to create `pg_default_acl` table
- Default privileges can get arbitrarily large
- This is not any more problematic than how we currently store privileges on descriptors, default privileges in almost all cases should be smaller than the privilege descriptor itself.
Note: the public schema currently does not have a SchemaDescriptor. It may be be a good idea to wait until the work to migrate away from descriptor-less public schemas to be complete before working on supporting default privileges for schemas. Relevant issue: [#55793](https://github.com/cockroachdb/cockroach/issues/55793).
#### Alternative: Store default privileges in a table (like `pg_default_acl`)
**Pros**:
- Reduced materialization cost when querying the `pg_default_acl` table.
**Cons**:
- Have to query to grab privileges when creating table, at most O(logn) in number of objects
Note that this is fairly similar to how we store users / role options in `system.users` / `system.role_options`.
In terms of performance, option 1 optimizes for object creation since we can grab and copy the default privileges in O(1) time.
Intuitively it makes more sense to optimize for performance for object creation since ALTERING DEFAULT PRIVILEGE is likely to happen less frequently than object creation. Also option 1 is more inline with what we currently do with privileges. Hence, overall option 1 is preferable.
### New Syntax
We’ll have to add new syntax for ALTER DEFAULT PRIVILEGES
Copied from Postgres:
```
ALTER DEFAULT PRIVILEGES
[ FOR { ROLE | USER } target_role [, ...] ]
[ IN SCHEMA schema_name [, ...] ]
Abbreviated_grant_or_revoke
```
Note that there's no syntax where you specify a database name for altering default privileges in Postgres. This makes more sense in Postgres where databases are more isolated. We may want to explore having syntax for altering default privileges for a database.
Alter default privileges without specifying a schema always refers to the current database.
### Migration / Upgrade Path
Another key point to take into consideration is how will we handle existing objects that will not have DEFAULT PRIVILEGES defined.
We largely have two options for a migration. Either we do an explicit migration - a long running migration to populate the default privileges PrivilegeDescriptors on all the database objects.
Or two, we can handle it similarly to how we handled owners, if the object descriptor has no PrivilegeDescriptor field (we can also use a bool or a version field), we’ll interpret it as some set of default privileges* and create the PrivilegeDescriptor as needed. If an ALTER DEFAULT PRIVILEGES is run for that object, we can then create and populate the descriptor (or flip the bool / update the version).
* With an explicit long running migration, we can version gate using a cluster setting to ensure that all the PrivilegeDescriptors have been upgraded such that we can use the default privilege syntax and features.
We can also first use option two to add logic to create default privileges on the fly if the object explicitly does not have default privileges and follow up with a long running migration in a future version.
Overall, I would start with option two as it is less intrusive before performing a long running migration to fully upgrade the descriptors.
We also have to take into consideration what set of default privileges we want to use if the default privileges are not set for a descriptor. We can either use the unchanged default set of privileges for that object type or we may want to generate the default privileges based on the privileges on the object. For example if we have SELECT on a database, the set of default privileges we create should include SELECT for backwards compatibility reasons, prior to this change, a table created in a database where a user has SELECT privilege would have SELECT privilege on the table.
### Deprecating Postgres incompatible privileges
Incompatible privilege means a privilege we support granting/revoking on an object in CockroachDB that is not supported in Postgres.
In relation to default privileges, should we ensure all our privileges match 1:1 with Postgres? After supporting default privileges, we should be able to remove the incompatible privileges from being granted to objects. (See table below for incompatibilities).
Example: currently we support SELECT on Databases for Cockroach, Postgres does not allow granting SELECT on databases. As mentioned before, one part of allowing SELECT on databases was to make our ad-hoc inheritance system work.
Once we have our DEFAULT PRIVILEGEs system implemented and ironed out, should we make an effort to fully deprecate SELECT on databases and other similar cases. We currently have not deprecated any privileges to maintain backwards compatibility.
One possible migration path is to deprecate GRANTing of incompatible privileges while allowing them to be REVOKED (from old descriptors) and technically “valid” the have on the descriptor.
In conjunction with deprecating the GRANT syntax, we can also leverage the version field on PrivilegeDescriptors to ensure PrivilegeDescriptors created on the version with default privileges do not have the incompatible privileges.
If necessary / not risky - we can create a long-running migration to fully remove all invalid privileges and upgrade the versions of the Privilege descriptors.
#### Proposed migration path:
1. In 21.2, continue supporting the syntax for incompatible privileges, however under the hood we will "translate" the incompatible privilege to the corresponding default privilege.
- GRANTs will be translated to ALTER DEFAULT PRIVILEGES GRANT ...
- REVOKEs will have to check both the Privileges and Default privileges if it is an incompatible privilege.
- We will add a warning that the syntax will be deprecated in 22.1.
- This gives us more time to warn users about deprecation of GRANTing certain privileges.
- The caveat to this is that our current privilege inheritance model done does not exactly translate 1:1 with default privileges, read the section on [Translating Our Current Privilege Inheritance to Default Privileges](#translate)
2. In 22.1, deprecate syntax for granting/revoking incompatible privileges and add a long running migration to upgrade PrivilegeDescriptors and remove / migrate incompatible privileges.
3. In 22.2, remove long running migration.
This is ideal as we give users a full version before we deprecate syntax while maintaining
full backwards compatibility for how privilege inheritance currently works.
#### Translating Our Current Privilege Inheritance to Default Privileges <span id="translate"><span>
How our current privilege inheritance model works is that whenever we create an object, we copy over any "valid" privileges from the database to the new object.
This means if a user has a privilege - for example `SELECT` on the database, regardless of who creates
a table in that database, the user will have `SELECT` on the table.
This concept does not translate 1:1 to Postgres' concept of default privileges.
Default privileges are set per role. The set of privileges a user gets on an object when it is created
depends on who created the object.
There is no concept of having a set of default privileges for any role that creates an object - which is currently how it works in CockroachDB.
To illustrate this further, `ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES to foo` is actually equivalent too `ALTER DEFAULT PRIVILEGES FOR USER current_user GRANT SELECT ON TABLES to foo` and this means any tables current_user creates in the current database will have SELECT on it for user foo.
There is no concept of `ALTER DEFAULT PRIVILEGES FOR ALL ...`
The proposal to work around this is having a pseudo `ALL` role and where we translate GRANTS over to for the purpose of migration.
To grant or revoke from the pseudo `ALL` role, we could support the syntax
`ALTER DEFAULT PRIVILEGES FOR ALL ROLES...`
v21.2, we would translate `GRANT SELECT ON DATABASE d TO foo` to `ALTER DEFAULT PRIVILEGES FOR ALL ROLES GRANT SELECT ON TABLES TO foo`.
Note only tables "inherit" `SELECT` privilege from the parent database.
Furthermore, all existing `SELECTs` and similar invalid privileges that we support granting for the sake of inheritance
will be migrated over to default privileges for `ALL` or the chosen reserved role.
When creating a new object, the set of privileges on the object is the union of the privileges for the role creating the object and the `ALL` role.
We could choose to deprecate the feature of `ALTER DEFAULT PRIVILEGES FOR ALL ROLES...` in 22.1 if necessary.
#### Alternative Migration Path
If we find that translating granted privileges over to default privileges is not ideal,
we can completely forgo the translation and simply continue to support granting invalid privileges in 21.2 with a deprecation warning.
In 22.1, we deprecate the syntax for granting invalid privileges and add a migration for invalid privileges to default privileges. We would still need some concept of the `ALL` role.
In 22.2, we remove migration. We would also need a mechanism to remove default privileges from the `ALL` role.
### Privilege Incompatibilities (CockroachDB vs Postgres)
| Privilege | Applicable Object Types (Postgres) | Applicable Object Types (CRDB) |
| ---------- | ------------------------------------------------------------------------------ | --------------------------------------------------- |
| SELECT | LARGE OBJECT, SEQUENCE, TABLE (and table-like objects), table column | SEQUENCE, TABLE (and table-like objects), DATABASES |
| INSERT | TABLE, table column | TABLE (and table-like objects), DATABASES |
| UPDATE | LARGE OBJECT, SEQUENCE, TABLE, table column | TABLE (and table-like objects), DATABASES |
| DELETE | TABLE | TABLE (and table-like objects), DATABASES |
| TRUNCATE | TABLE | N/A |
| REFERENCES | TABLE, table column | N/A |
| TRIGGER | TABLE | N/A |
| CREATE | DATABASE, SCHEMA, TABLESPACE | DATABASE, SCHEMA, TABLE (and table-like objects) |
| CONNECT | DATABASE | DATABASE |
| TEMPORARY | DATABASE | N/A |
| EXECUTE | FUNCTION, PROCEDURE | N/A |
| USAGE | DOMAIN, FOREIGN DATA WRAPPER, FOREIGN SERVER, LANGUAGE, SCHEMA, SEQUENCE, TYPE | SCHEMA, TYPE |
- **Table-like objects includes sequences, views**
- **Sequences have the same privilege set as tables in CRDB, this is not true in Postgres, see USAGE**
### SHOW DEFAULT PRIVILEGES
```
SHOW DEFAULT PRIVILEGES
[ FOR { ROLE | USER } role [, ...] ]
[ IN SCHEMA schema [, ...]]
```
Similarly to privileges, it may be useful to have a `SHOW DEFAULT PRIVILEGES` command.
Having a notion of `SHOW DEFAULT PRIVILEGES` FOR ALL will also be useful.
We can either have explicit syntax ie `SHOW DEFAULT PRIVILEGES FOR ALL [ROLES]`
or make it so omitting the `FOR ROLE` part of the syntax will return a list of
default privileges for all roles. This should follow closely to how / if we implement default privileges for all users.
## Explain it to folk outside of your team
In an effort to make our privilege system more compatible with Postgres, we would like to introduce default privileges to CockroachDB. CockroachDB currently does not have the concept of default privileges and thus privileges for new objects have their privileges somewhat arbitrarily copied from the schema or database they’re created in. Default privileges allow a user to specify which privileges new objects in a schema or database will have upon creation. This will reduce the burden of requiring CockroachDB users to learn a new type of privilege system. Specifically users who come from Postgres will not have to learn about specific rules in Cockroach’s privilege system reducing the friction of using CockroachDB.
| docs/RFCS/20210527_default_privileges.md | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001910999562824145,
0.00017055109492503107,
0.00016305374447256327,
0.00016666812007315457,
0.000008715878720977344
] |
{
"id": 7,
"code_window": [
"\t))\n",
"\t// Validate that the post serialization will recompute the constraint IDs\n",
"\t// if they are missing.\n",
"\tdesc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))\n",
"\t// If we set both the constraint ID / next value to 0, then we will have\n",
"\t// it assigned form scratch.\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\tdesc.NextConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(4))\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 75
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.998259961605072,
0.21286489069461823,
0.0001628599566174671,
0.0005718145403079689,
0.40356555581092834
] |
{
"id": 7,
"code_window": [
"\t))\n",
"\t// Validate that the post serialization will recompute the constraint IDs\n",
"\t// if they are missing.\n",
"\tdesc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))\n",
"\t// If we set both the constraint ID / next value to 0, then we will have\n",
"\t// it assigned form scratch.\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\tdesc.NextConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(4))\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 75
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package memo
import (
"context"
"fmt"
"math/bits"
"sort"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/constraint"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props"
"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree/treewindow"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// RelExpr is implemented by all operators tagged as Relational. Relational
// expressions have a set of logical properties that describe the content and
// characteristics of their behavior and results. They are stored as part of a
// memo group that contains other logically equivalent expressions. Expressions
// in the same memo group are linked together in a list that can be traversed
// via calls to FirstExpr and NextExpr:
//
// +--------------------------------------+
// | +---------------+ |
// | | |FirstExpr |FirstExpr
// v v | |
// member #1 -------> member #2 --------> member #3 -------> nil
// NextExpr NextExpr NextExpr
//
// A relational expression's physical properties and cost are defined once it
// has been optimized.
type RelExpr interface {
opt.Expr
// Memo is the memo which contains this relational expression.
Memo() *Memo
// Relational is the set of logical properties that describe the content and
// characteristics of this expression's behavior and results.
Relational() *props.Relational
// RequiredPhysical is the set of required physical properties with respect to
// which this expression was optimized. Enforcers may be added to the
// expression tree to ensure the physical properties are provided.
//
// Set when optimization is complete, only for the expressions in the final
// tree.
RequiredPhysical() *physical.Required
// ProvidedPhysical is the set of provided physical properties (which must be
// compatible with the set of required physical properties).
//
// Set when optimization is complete, only for the expressions in the final
// tree.
ProvidedPhysical() *physical.Provided
// Cost is an estimate of the cost of executing this expression tree. Set
// when optimization is complete, only for the expressions in the final tree.
Cost() Cost
// FirstExpr returns the first member expression in the memo group (could be
// this expression if it happens to be first in the group). Subsequent members
// can be enumerated by then calling NextExpr. Note that enforcer operators
// are not part of this list (but do maintain a link to it).
FirstExpr() RelExpr
// NextExpr returns the next member expression in the memo group, or nil if
// there are no further members in the group.
NextExpr() RelExpr
// group returns the memo group that contains this expression and any other
// logically equivalent expressions. There is one group struct for each memo
// group that stores the properties for the group, as well as the pointer to
// the first member of the group.
group() exprGroup
// bestProps returns the instance of bestProps associated with this
// expression.
bestProps() *bestProps
// setNext sets this expression's next pointer to point to the given
// expression. setNext will panic if the next pointer has already been set.
setNext(e RelExpr)
}
// ScalarPropsExpr is implemented by scalar expressions which cache scalar
// properties, like FiltersExpr and ProjectionsExpr. These expressions are also
// tagged with the ScalarProps tag.
type ScalarPropsExpr interface {
opt.ScalarExpr
// ScalarProps returns the scalar properties associated with the expression.
ScalarProps() *props.Scalar
}
// TrueSingleton is a global instance of TrueExpr, to avoid allocations.
var TrueSingleton = &TrueExpr{}
// FalseSingleton is a global instance of FalseExpr, to avoid allocations.
var FalseSingleton = &FalseExpr{}
// NullSingleton is a global instance of NullExpr having the Unknown type (most
// common case), to avoid allocations.
var NullSingleton = &NullExpr{Typ: types.Unknown}
// TODO(justin): perhaps these should be auto-generated.
// RankSingleton is the global instance of RankExpr.
var RankSingleton = &RankExpr{}
// RowNumberSingleton is the global instance of RowNumber.
var RowNumberSingleton = &RowNumberExpr{}
// DenseRankSingleton is the global instance of DenseRankExpr.
var DenseRankSingleton = &DenseRankExpr{}
// PercentRankSingleton is the global instance of PercentRankExpr.
var PercentRankSingleton = &PercentRankExpr{}
// CumeDistSingleton is the global instance of CumeDistExpr.
var CumeDistSingleton = &CumeDistExpr{}
// CountRowsSingleton maintains a global instance of CountRowsExpr, to avoid
// allocations.
var CountRowsSingleton = &CountRowsExpr{}
// TrueFilter is a global instance of the empty FiltersExpr, used in situations
// where the filter should always evaluate to true:
//
// SELECT * FROM a INNER JOIN b ON True
//
var TrueFilter = FiltersExpr{}
// EmptyTuple is a global instance of a TupleExpr that contains no elements.
// While this cannot be created in SQL, it can be the created by normalizations.
var EmptyTuple = &TupleExpr{Typ: types.EmptyTuple}
// ScalarListWithEmptyTuple is a global instance of a ScalarListExpr containing
// a TupleExpr that contains no elements. It's used when constructing an empty
// ValuesExpr:
//
// SELECT 1
//
var ScalarListWithEmptyTuple = ScalarListExpr{EmptyTuple}
// EmptyGroupingPrivate is a global instance of a GroupingPrivate that has no
// grouping columns and no ordering.
var EmptyGroupingPrivate = &GroupingPrivate{}
// EmptyJoinPrivate is a global instance of a JoinPrivate that has no fields
// set.
var EmptyJoinPrivate = &JoinPrivate{}
// LastGroupMember returns the last member in the same memo group of the given
// relational expression.
func LastGroupMember(e RelExpr) RelExpr {
for {
next := e.NextExpr()
if next == nil {
return e
}
e = next
}
}
// IsTrue is true if the FiltersExpr always evaluates to true. This is the case
// when it has zero conditions.
func (n FiltersExpr) IsTrue() bool {
return len(n) == 0
}
// IsFalse is true if the FiltersExpr always evaluates to false. The only case
// that's checked is the fully normalized case, when the list contains a single
// False condition.
func (n FiltersExpr) IsFalse() bool {
return len(n) == 1 && n[0].Condition.Op() == opt.FalseOp
}
// OuterCols returns the set of outer columns needed by any of the filter
// condition expressions.
func (n FiltersExpr) OuterCols() opt.ColSet {
var colSet opt.ColSet
for i := range n {
colSet.UnionWith(n[i].ScalarProps().OuterCols)
}
return colSet
}
// Sort sorts the FilterItems in n by the ranks of the expressions.
func (n *FiltersExpr) Sort() {
sort.Slice(*n, func(i, j int) bool {
return (*n)[i].Condition.Rank() < (*n)[j].Condition.Rank()
})
}
// Deduplicate removes all the duplicate filters from n.
func (n *FiltersExpr) Deduplicate() {
dedup := (*n)[:0]
// Only add it if it hasn't already been added.
for i, filter := range *n {
found := false
for j := i - 1; j >= 0; j-- {
previouslySeenFilter := (*n)[j]
if previouslySeenFilter.Condition == filter.Condition {
found = true
break
}
}
if !found {
dedup = append(dedup, filter)
}
}
*n = dedup
}
// RemoveFiltersItem returns a new list that is a copy of the given list, except
// that it does not contain the given FiltersItem. If the list contains the item
// multiple times, then only the first instance is removed. If the list does not
// contain the item, then the method panics.
func (n FiltersExpr) RemoveFiltersItem(search *FiltersItem) FiltersExpr {
newFilters := make(FiltersExpr, len(n)-1)
for i := range n {
if search == &n[i] {
copy(newFilters, n[:i])
copy(newFilters[i:], n[i+1:])
return newFilters
}
}
panic(errors.AssertionFailedf("item to remove is not in the list: %v", search))
}
// Difference returns a new list of filters containing the filters of n that are
// not in other. If other is empty, n is returned.
func (n FiltersExpr) Difference(other FiltersExpr) FiltersExpr {
if len(other) == 0 {
return n
}
newFilters := make(FiltersExpr, 0, len(n))
for i := range n {
found := false
for j := range other {
if n[i].Condition == other[j].Condition {
found = true
break
}
}
if !found {
newFilters = append(newFilters, n[i])
}
}
return newFilters
}
// OutputCols returns the set of columns constructed by the Aggregations
// expression.
func (n AggregationsExpr) OutputCols() opt.ColSet {
var colSet opt.ColSet
for i := range n {
colSet.Add(n[i].Col)
}
return colSet
}
// OuterCols returns the set of outer columns needed by any of the zip
// expressions.
func (n ZipExpr) OuterCols() opt.ColSet {
var colSet opt.ColSet
for i := range n {
colSet.UnionWith(n[i].ScalarProps().OuterCols)
}
return colSet
}
// OutputCols returns the set of columns constructed by the Zip expression.
func (n ZipExpr) OutputCols() opt.ColSet {
var colSet opt.ColSet
for i := range n {
for _, col := range n[i].Cols {
colSet.Add(col)
}
}
return colSet
}
// TupleOrdinal is an ordinal index into an expression of type Tuple. It is
// used by the ColumnAccess scalar expression.
type TupleOrdinal uint32
// ScanLimit is used for a limited table or index scan and stores the limit as
// well as the desired scan direction. A value of 0 means that there is no
// limit.
type ScanLimit int64
// MakeScanLimit initializes a ScanLimit with a number of rows and a direction.
func MakeScanLimit(rowCount int64, reverse bool) ScanLimit {
if reverse {
return ScanLimit(-rowCount)
}
return ScanLimit(rowCount)
}
// IsSet returns true if there is a limit.
func (sl ScanLimit) IsSet() bool {
return sl != 0
}
// RowCount returns the number of rows in the limit.
func (sl ScanLimit) RowCount() int64 {
if sl.Reverse() {
return int64(-sl)
}
return int64(sl)
}
// Reverse returns true if the limit requires a reverse scan.
func (sl ScanLimit) Reverse() bool {
return sl < 0
}
func (sl ScanLimit) String() string {
if sl.Reverse() {
return fmt.Sprintf("%d(rev)", -sl)
}
return fmt.Sprintf("%d", sl)
}
// ScanFlags stores any flags for the scan specified in the query (see
// tree.IndexFlags). These flags may be consulted by transformation rules or the
// coster.
type ScanFlags struct {
// NoIndexJoin disallows use of non-covering indexes (index-join) for scanning
// this table.
NoIndexJoin bool
// NoZigzagJoin disallows use of a zigzag join for scanning this table.
NoZigzagJoin bool
// NoFullScan disallows use of a full scan for scanning this table.
NoFullScan bool
// ForceIndex forces the use of a specific index (specified in Index).
// ForceIndex and NoIndexJoin cannot both be set at the same time.
ForceIndex bool
ForceZigzag bool
Direction tree.Direction
Index int
// ZigzagIndexes makes planner prefer a zigzag with particular indexes.
// ForceZigzag must also be true.
ZigzagIndexes util.FastIntSet
}
// Empty returns true if there are no flags set.
func (sf *ScanFlags) Empty() bool {
return *sf == ScanFlags{}
}
// JoinFlags stores restrictions on the join execution method, derived from
// hints for a join specified in the query (see tree.JoinTableExpr). It is a
// bitfield where each bit indicates if a certain type of join is disallowed or
// preferred.
//
// The zero value indicates that any join is allowed and there are no special
// preferences.
type JoinFlags uint16
// Each flag indicates if a certain type of join is disallowed.
const (
// DisallowHashJoinStoreLeft corresponds to a hash join where the left side is
// stored into the hashtable. Note that execution can override the stored side
// if it finds that the other side is smaller (up to a certain size).
DisallowHashJoinStoreLeft JoinFlags = 1 << iota
// DisallowHashJoinStoreRight corresponds to a hash join where the right side
// is stored into the hashtable. Note that execution can override the stored
// side if it finds that the other side is smaller (up to a certain size).
DisallowHashJoinStoreRight
// DisallowMergeJoin corresponds to a merge join.
DisallowMergeJoin
// DisallowLookupJoinIntoLeft corresponds to a lookup join where the lookup
// table is on the left side.
DisallowLookupJoinIntoLeft
// DisallowLookupJoinIntoRight corresponds to a lookup join where the lookup
// table is on the right side.
DisallowLookupJoinIntoRight
// DisallowInvertedJoinIntoLeft corresponds to an inverted join where the
// inverted index is on the left side.
DisallowInvertedJoinIntoLeft
// DisallowInvertedJoinIntoRight corresponds to an inverted join where the
// inverted index is on the right side.
DisallowInvertedJoinIntoRight
// PreferLookupJoinIntoLeft reduces the cost of a lookup join where the lookup
// table is on the left side.
PreferLookupJoinIntoLeft
// PreferLookupJoinIntoRight reduces the cost of a lookup join where the
// lookup table is on the right side.
PreferLookupJoinIntoRight
)
const (
disallowAll = DisallowHashJoinStoreLeft |
DisallowHashJoinStoreRight |
DisallowMergeJoin |
DisallowLookupJoinIntoLeft |
DisallowLookupJoinIntoRight |
DisallowInvertedJoinIntoLeft |
DisallowInvertedJoinIntoRight
// AllowOnlyHashJoinStoreRight has all "disallow" flags set except
// DisallowHashJoinStoreRight.
AllowOnlyHashJoinStoreRight = disallowAll ^ DisallowHashJoinStoreRight
// AllowOnlyLookupJoinIntoRight has all "disallow" flags set except
// DisallowLookupJoinIntoRight.
AllowOnlyLookupJoinIntoRight = disallowAll ^ DisallowLookupJoinIntoRight
// AllowOnlyInvertedJoinIntoRight has all "disallow" flags set except
// DisallowInvertedJoinIntoRight.
AllowOnlyInvertedJoinIntoRight = disallowAll ^ DisallowInvertedJoinIntoRight
// AllowOnlyMergeJoin has all "disallow" flags set except DisallowMergeJoin.
AllowOnlyMergeJoin = disallowAll ^ DisallowMergeJoin
)
var joinFlagStr = map[JoinFlags]string{
DisallowHashJoinStoreLeft: "hash join (store left side)",
DisallowHashJoinStoreRight: "hash join (store right side)",
DisallowMergeJoin: "merge join",
DisallowLookupJoinIntoLeft: "lookup join (into left side)",
DisallowLookupJoinIntoRight: "lookup join (into right side)",
DisallowInvertedJoinIntoLeft: "inverted join (into left side)",
DisallowInvertedJoinIntoRight: "inverted join (into right side)",
PreferLookupJoinIntoLeft: "lookup join (into left side)",
PreferLookupJoinIntoRight: "lookup join (into right side)",
}
// Empty returns true if this is the default value (where all join types are
// allowed).
func (jf JoinFlags) Empty() bool {
return jf == 0
}
// Has returns true if the given flag is set.
func (jf JoinFlags) Has(flag JoinFlags) bool {
return jf&flag != 0
}
func (jf JoinFlags) String() string {
if jf.Empty() {
return "no flags"
}
prefer := jf & (PreferLookupJoinIntoLeft | PreferLookupJoinIntoRight)
disallow := jf ^ prefer
// Special cases with prettier results for common cases.
var b strings.Builder
switch disallow {
case AllowOnlyHashJoinStoreRight:
b.WriteString("force hash join (store right side)")
case AllowOnlyLookupJoinIntoRight:
b.WriteString("force lookup join (into right side)")
case AllowOnlyInvertedJoinIntoRight:
b.WriteString("force inverted join (into right side)")
case AllowOnlyMergeJoin:
b.WriteString("force merge join")
default:
for disallow != 0 {
flag := JoinFlags(1 << uint8(bits.TrailingZeros8(uint8(disallow))))
if b.Len() == 0 {
b.WriteString("disallow ")
} else {
b.WriteString(" and ")
}
b.WriteString(joinFlagStr[flag])
disallow ^= flag
}
}
for prefer != 0 {
flag := JoinFlags(1 << uint8(bits.TrailingZeros8(uint8(prefer))))
if b.Len() > 0 {
b.WriteString("; ")
}
b.WriteString("prefer ")
b.WriteString(joinFlagStr[flag])
prefer ^= flag
}
return b.String()
}
func (ij *InnerJoinExpr) initUnexportedFields(mem *Memo) {
initJoinMultiplicity(ij)
}
func (lj *LeftJoinExpr) initUnexportedFields(mem *Memo) {
initJoinMultiplicity(lj)
}
func (fj *FullJoinExpr) initUnexportedFields(mem *Memo) {
initJoinMultiplicity(fj)
}
func (sj *SemiJoinExpr) initUnexportedFields(mem *Memo) {
initJoinMultiplicity(sj)
}
func (lj *LookupJoinExpr) initUnexportedFields(mem *Memo) {
// lookupProps are initialized as necessary by the logical props builder.
}
func (gj *InvertedJoinExpr) initUnexportedFields(mem *Memo) {
// lookupProps are initialized as necessary by the logical props builder.
}
func (zj *ZigzagJoinExpr) initUnexportedFields(mem *Memo) {
// leftProps and rightProps are initialized as necessary by the logical props
// builder.
}
// joinWithMultiplicity allows join operators for which JoinMultiplicity is
// supported (currently InnerJoin, LeftJoin, and FullJoin) to be treated
// polymorphically.
type joinWithMultiplicity interface {
setMultiplicity(props.JoinMultiplicity)
getMultiplicity() props.JoinMultiplicity
}
var _ joinWithMultiplicity = &InnerJoinExpr{}
var _ joinWithMultiplicity = &LeftJoinExpr{}
var _ joinWithMultiplicity = &FullJoinExpr{}
var _ joinWithMultiplicity = &SemiJoinExpr{}
func (ij *InnerJoinExpr) setMultiplicity(multiplicity props.JoinMultiplicity) {
ij.multiplicity = multiplicity
}
func (ij *InnerJoinExpr) getMultiplicity() props.JoinMultiplicity {
return ij.multiplicity
}
func (lj *LeftJoinExpr) setMultiplicity(multiplicity props.JoinMultiplicity) {
lj.multiplicity = multiplicity
}
func (lj *LeftJoinExpr) getMultiplicity() props.JoinMultiplicity {
return lj.multiplicity
}
func (fj *FullJoinExpr) setMultiplicity(multiplicity props.JoinMultiplicity) {
fj.multiplicity = multiplicity
}
func (fj *FullJoinExpr) getMultiplicity() props.JoinMultiplicity {
return fj.multiplicity
}
func (sj *SemiJoinExpr) setMultiplicity(multiplicity props.JoinMultiplicity) {
sj.multiplicity = multiplicity
}
func (sj *SemiJoinExpr) getMultiplicity() props.JoinMultiplicity {
return sj.multiplicity
}
// WindowFrame denotes the definition of a window frame for an individual
// window function, excluding the OFFSET expressions, if present.
type WindowFrame struct {
Mode treewindow.WindowFrameMode
StartBoundType treewindow.WindowFrameBoundType
EndBoundType treewindow.WindowFrameBoundType
FrameExclusion treewindow.WindowFrameExclusion
}
// HasOffset returns true if the WindowFrame contains a specific offset.
func (f *WindowFrame) HasOffset() bool {
return f.StartBoundType.IsOffset() || f.EndBoundType.IsOffset()
}
func (f *WindowFrame) String() string {
var bld strings.Builder
switch f.Mode {
case treewindow.GROUPS:
fmt.Fprintf(&bld, "groups")
case treewindow.ROWS:
fmt.Fprintf(&bld, "rows")
case treewindow.RANGE:
fmt.Fprintf(&bld, "range")
}
frameBoundName := func(b treewindow.WindowFrameBoundType) string {
switch b {
case treewindow.UnboundedFollowing, treewindow.UnboundedPreceding:
return "unbounded"
case treewindow.CurrentRow:
return "current-row"
case treewindow.OffsetFollowing, treewindow.OffsetPreceding:
return "offset"
}
panic(errors.AssertionFailedf("unexpected bound"))
}
fmt.Fprintf(&bld, " from %s to %s",
frameBoundName(f.StartBoundType),
frameBoundName(f.EndBoundType),
)
switch f.FrameExclusion {
case treewindow.ExcludeCurrentRow:
bld.WriteString(" exclude current row")
case treewindow.ExcludeGroup:
bld.WriteString(" exclude group")
case treewindow.ExcludeTies:
bld.WriteString(" exclude ties")
}
return bld.String()
}
// IsCanonical returns true if the ScanPrivate indicates an original unaltered
// primary index Scan operator (i.e. unconstrained and not limited).
func (s *ScanPrivate) IsCanonical() bool {
return s.Index == cat.PrimaryIndex &&
s.Constraint == nil &&
s.HardLimit == 0 &&
!s.LocalityOptimized
}
// IsUnfiltered returns true if the ScanPrivate will produce all rows in the
// table.
func (s *ScanPrivate) IsUnfiltered(md *opt.Metadata) bool {
return (s.Constraint == nil || s.Constraint.IsUnconstrained()) &&
s.InvertedConstraint == nil &&
s.HardLimit == 0 &&
s.PartialIndexPredicate(md) == nil
}
// IsFullIndexScan returns true if the ScanPrivate will produce all rows in the
// index.
func (s *ScanPrivate) IsFullIndexScan(md *opt.Metadata) bool {
return (s.Constraint == nil || s.Constraint.IsUnconstrained()) &&
s.InvertedConstraint == nil &&
s.HardLimit == 0
}
// IsLocking returns true if the ScanPrivate is configured to use a row-level
// locking mode. This can be the case either because the Scan is in the scope of
// a SELECT .. FOR [KEY] UPDATE/SHARE clause or because the Scan was configured
// as part of the row retrieval of a DELETE or UPDATE statement.
func (s *ScanPrivate) IsLocking() bool {
return s.Locking != nil
}
// PartialIndexPredicate returns the FiltersExpr representing the predicate of
// the partial index that the scan uses. If the scan does not use a partial
// index, nil is returned.
func (s *ScanPrivate) PartialIndexPredicate(md *opt.Metadata) FiltersExpr {
tabMeta := md.TableMeta(s.Table)
p, ok := tabMeta.PartialIndexPredicate(s.Index)
if !ok {
// The index is not a partial index.
return nil
}
return *p.(*FiltersExpr)
}
// SetConstraint sets the constraint in the ScanPrivate and caches the exact
// prefix. This function should always be used instead of modifying the
// constraint directly.
func (s *ScanPrivate) SetConstraint(evalCtx *tree.EvalContext, c *constraint.Constraint) {
s.Constraint = c
if c == nil {
s.ExactPrefix = 0
} else {
s.ExactPrefix = c.ExactPrefix(evalCtx)
}
}
// UsesPartialIndex returns true if the LookupJoinPrivate looks-up via a
// partial index.
func (lj *LookupJoinPrivate) UsesPartialIndex(md *opt.Metadata) bool {
_, isPartialIndex := md.Table(lj.Table).Index(lj.Index).Predicate()
return isPartialIndex
}
// NeedResults returns true if the mutation operator can return the rows that
// were mutated.
func (m *MutationPrivate) NeedResults() bool {
return m.ReturnCols != nil
}
// IsColumnOutput returns true if the i-th ordinal column should be part of the
// mutation's output columns.
func (m *MutationPrivate) IsColumnOutput(i int) bool {
return i < len(m.ReturnCols) && m.ReturnCols[i] != 0
}
// MapToInputID maps from the ID of a returned column to the ID of the
// corresponding input column that provides the value for it. If there is no
// matching input column ID, MapToInputID returns 0.
//
// NOTE: This can only be called if the mutation operator returns rows.
func (m *MutationPrivate) MapToInputID(tabColID opt.ColumnID) opt.ColumnID {
if m.ReturnCols == nil {
panic(errors.AssertionFailedf("MapToInputID cannot be called if ReturnCols is not defined"))
}
ord := m.Table.ColumnOrdinal(tabColID)
return m.ReturnCols[ord]
}
// MapToInputCols maps the given set of columns to a corresponding set of
// input columns using the PassthroughCols list and MapToInputID function.
func (m *MutationPrivate) MapToInputCols(cols opt.ColSet) opt.ColSet {
var inCols opt.ColSet
// First see if any of the columns come from the passthrough columns.
for _, c := range m.PassthroughCols {
if cols.Contains(c) {
inCols.Add(c)
}
}
// The remaining columns must come from the table.
tabCols := cols.Difference(inCols)
tabCols.ForEach(func(t opt.ColumnID) {
id := m.MapToInputID(t)
if id == 0 {
panic(errors.AssertionFailedf("could not find input column for %d", log.Safe(t)))
}
inCols.Add(id)
})
return inCols
}
// AddEquivTableCols adds an FD to the given set that declares an equivalence
// between each table column and its corresponding input column.
func (m *MutationPrivate) AddEquivTableCols(md *opt.Metadata, fdset *props.FuncDepSet) {
for i, n := 0, md.Table(m.Table).ColumnCount(); i < n; i++ {
t := m.Table.ColumnID(i)
id := m.MapToInputID(t)
if id != 0 {
fdset.AddEquivalency(t, id)
}
}
}
// WithBindingID is used by factory.Replace as a uniform way to get the with ID.
func (m *MutationPrivate) WithBindingID() opt.WithID {
return m.WithID
}
// WithBindingID is used by factory.Replace as a uniform way to get the with ID.
func (w *WithExpr) WithBindingID() opt.WithID {
return w.ID
}
// WithBindingID is used by factory.Replace as a uniform way to get the with ID.
func (r *RecursiveCTEExpr) WithBindingID() opt.WithID {
return r.WithID
}
// initUnexportedFields is called when a project expression is created.
func (prj *ProjectExpr) initUnexportedFields(mem *Memo) {
inputProps := prj.Input.Relational()
// Determine the not-null columns.
prj.notNullCols = inputProps.NotNullCols.Copy()
for i := range prj.Projections {
item := &prj.Projections[i]
if ExprIsNeverNull(item.Element, inputProps.NotNullCols) {
prj.notNullCols.Add(item.Col)
}
}
// Determine the "internal" functional dependencies (for the union of input
// columns and synthesized columns).
prj.internalFuncDeps.CopyFrom(&inputProps.FuncDeps)
for i := range prj.Projections {
item := &prj.Projections[i]
if v, ok := item.Element.(*VariableExpr); ok && inputProps.OutputCols.Contains(v.Col) {
// Handle any column that is a direct reference to an input column. The
// optimizer sometimes constructs these in order to generate different
// column IDs; they can also show up after constant-folding e.g. an ORDER
// BY expression.
prj.internalFuncDeps.AddEquivalency(v.Col, item.Col)
continue
}
if !item.scalar.VolatilitySet.HasVolatile() {
from := item.scalar.OuterCols.Intersection(inputProps.OutputCols)
// We want to set up the FD: from --> colID.
// This does not necessarily hold for "composite" types like decimals or
// collated strings. For example if d is a decimal, d::TEXT can have
// different values for equal values of d, like 1 and 1.0.
if !CanBeCompositeSensitive(mem.Metadata(), item.Element) {
prj.internalFuncDeps.AddSynthesizedCol(from, item.Col)
}
}
}
prj.internalFuncDeps.MakeNotNull(prj.notNullCols)
}
// InternalFDs returns the functional dependencies for the set of all input
// columns plus the synthesized columns.
func (prj *ProjectExpr) InternalFDs() *props.FuncDepSet {
return &prj.internalFuncDeps
}
// ExprIsNeverNull makes a best-effort attempt to prove that the provided
// scalar is always non-NULL, given the set of outer columns that are known
// to be not null. This is particularly useful with check constraints.
// Check constraints are satisfied when the condition evaluates to NULL,
// whereas filters are not. For example consider the following check constraint:
//
// CHECK (col IN (1, 2, NULL))
//
// Any row evaluating this check constraint with any value for the column will
// satisfy this check constraint, as they would evaluate to true (in the case
// of 1 or 2) or NULL (in the case of everything else).
func ExprIsNeverNull(e opt.ScalarExpr, notNullCols opt.ColSet) bool {
switch t := e.(type) {
case *VariableExpr:
return notNullCols.Contains(t.Col)
case *TrueExpr, *FalseExpr, *ConstExpr, *IsExpr, *IsNotExpr, *IsTupleNullExpr, *IsTupleNotNullExpr:
return true
case *NullExpr:
return false
case *TupleExpr:
// TODO(ridwanmsharif): Make this less conservative and instead update how
// IN and NOT IN behave w.r.t tuples and how IndirectionExpr works with arrays.
// Currently, the semantics of this function on Tuples are different
// as it returns whether a NULL evaluation is possible given the composition of
// the tuple. Changing this will require some additional logic in the IN cases.
for i := range t.Elems {
if !ExprIsNeverNull(t.Elems[i], notNullCols) {
return false
}
}
return true
case *InExpr, *NotInExpr:
// TODO(ridwanmsharif): If a tuple is found in either side, determine if the
// expression is nullable based on the composition of the tuples.
return ExprIsNeverNull(t.Child(0).(opt.ScalarExpr), notNullCols) &&
ExprIsNeverNull(t.Child(1).(opt.ScalarExpr), notNullCols)
case *ArrayExpr:
for i := range t.Elems {
if !ExprIsNeverNull(t.Elems[i], notNullCols) {
return false
}
}
return true
case *CaseExpr:
for i := range t.Whens {
if !ExprIsNeverNull(t.Whens[i], notNullCols) {
return false
}
}
return ExprIsNeverNull(t.Input, notNullCols) && ExprIsNeverNull(t.OrElse, notNullCols)
case *CastExpr, *AssignmentCastExpr, *NotExpr, *RangeExpr:
return ExprIsNeverNull(t.Child(0).(opt.ScalarExpr), notNullCols)
case *AndExpr, *OrExpr, *GeExpr, *GtExpr, *NeExpr, *EqExpr, *LeExpr, *LtExpr, *LikeExpr,
*NotLikeExpr, *ILikeExpr, *NotILikeExpr, *SimilarToExpr, *NotSimilarToExpr, *RegMatchExpr,
*NotRegMatchExpr, *RegIMatchExpr, *NotRegIMatchExpr, *ContainsExpr, *ContainedByExpr, *JsonExistsExpr,
*JsonAllExistsExpr, *JsonSomeExistsExpr, *AnyScalarExpr, *BitandExpr, *BitorExpr, *BitxorExpr,
*PlusExpr, *MinusExpr, *MultExpr, *DivExpr, *FloorDivExpr, *ModExpr, *PowExpr, *ConcatExpr,
*LShiftExpr, *RShiftExpr, *WhenExpr:
return ExprIsNeverNull(t.Child(0).(opt.ScalarExpr), notNullCols) &&
ExprIsNeverNull(t.Child(1).(opt.ScalarExpr), notNullCols)
default:
return false
}
}
// OutputColumnIsAlwaysNull returns true if the expression produces only NULL
// values for the given column. Used to elide foreign key checks.
//
// This could be a logical property but we only care about simple cases (NULLs
// in Projections and Values).
func OutputColumnIsAlwaysNull(e RelExpr, col opt.ColumnID) bool {
isNullScalar := func(scalar opt.ScalarExpr) bool {
switch scalar.Op() {
case opt.NullOp:
return true
case opt.CastOp:
// Normally this cast should have been folded, but we want this to work
// in "build" opttester mode (disabled normalization rules).
return scalar.Child(0).Op() == opt.NullOp
default:
return false
}
}
switch e.Op() {
case opt.ProjectOp:
p := e.(*ProjectExpr)
if p.Passthrough.Contains(col) {
return OutputColumnIsAlwaysNull(p.Input, col)
}
for i := range p.Projections {
if p.Projections[i].Col == col {
return isNullScalar(p.Projections[i].Element)
}
}
case opt.ValuesOp:
v := e.(*ValuesExpr)
colOrdinal, ok := v.Cols.Find(col)
if !ok {
return false
}
for i := range v.Rows {
if !isNullScalar(v.Rows[i].(*TupleExpr).Elems[colOrdinal]) {
return false
}
}
return true
}
return false
}
// CollectContiguousOrExprs finds all OrExprs in 'e' that are connected via
// a parent-child relationship, and returns them in an array of ScalarExprs.
func CollectContiguousOrExprs(e opt.ScalarExpr) []opt.ScalarExpr {
var disjunctions = make([]opt.ScalarExpr, 0, 2)
var collectDisjunctions func(e opt.ScalarExpr)
collectDisjunctions = func(e opt.ScalarExpr) {
if or, ok := e.(*OrExpr); ok {
collectDisjunctions(or.Left)
collectDisjunctions(or.Right)
} else {
disjunctions = append(disjunctions, e)
}
}
collectDisjunctions(e)
return disjunctions
}
// FKCascades stores metadata necessary for building cascading queries.
type FKCascades []FKCascade
// FKCascade stores metadata necessary for building a cascading query.
// Cascading queries are built as needed, after the original query is executed.
type FKCascade struct {
// FKName is the name of the FK constraint.
FKName string
// Builder is an object that can be used as the "optbuilder" for the cascading
// query.
Builder CascadeBuilder
// WithID identifies the buffer for the mutation input in the original
// expression tree. 0 if the cascade does not require input.
WithID opt.WithID
// OldValues are column IDs from the mutation input that correspond to the
// old values of the modified rows. The list maps 1-to-1 to foreign key
// columns. Empty if the cascade does not require input.
OldValues opt.ColList
// NewValues are column IDs from the mutation input that correspond to the
// new values of the modified rows. The list maps 1-to-1 to foreign key columns.
// It is empty if the mutation is a deletion. Empty if the cascade does not
// require input.
NewValues opt.ColList
}
// CascadeBuilder is an interface used to construct a cascading query for a
// specific FK relation. For example: if we are deleting rows from a parent
// table, after deleting the rows from the parent table this interface will be
// used to build the corresponding deletion in the child table.
type CascadeBuilder interface {
// Build constructs a cascading query that mutates the child table. The input
// is scanned using WithScan with the given WithID; oldValues and newValues
// columns correspond 1-to-1 to foreign key columns. For deletes, newValues is
// empty.
//
// The query does not need to be built in the same memo as the original query;
// the only requirement is that the mutation input columns
// (oldValues/newValues) are valid in the metadata.
//
// The method does not mutate any captured state; it is ok to call Build
// concurrently (e.g. if the plan it originates from is cached and reused).
//
// Some cascades (delete fast path) don't require an input binding. In that
// case binding is 0, bindingProps is nil, and oldValues/newValues are empty.
//
// Note: factory is always *norm.Factory; it is an interface{} only to avoid
// circular package dependencies.
Build(
ctx context.Context,
semaCtx *tree.SemaContext,
evalCtx *tree.EvalContext,
catalog cat.Catalog,
factory interface{},
binding opt.WithID,
bindingProps *props.Relational,
oldValues, newValues opt.ColList,
) (RelExpr, error)
}
// GroupingOrderType is the grouping column order type for group by and distinct
// operations in the memo.
type GroupingOrderType int
const (
// NoStreaming means that the grouping columns have no useful order, so a
// hash aggregator should be used.
NoStreaming GroupingOrderType = iota
// PartialStreaming means that the grouping columns are partially ordered, so
// some optimizations can be done during aggregation.
PartialStreaming
// Streaming means that the grouping columns are fully ordered.
Streaming
)
// GroupingOrderType calculates how many ordered columns that the grouping
// and input columns have in common and returns NoStreaming if there are none, Streaming if
// all columns match, and PartialStreaming if only some match. It is similar to
// StreamingGroupingColOrdering, but does not build an ordering.
func (g *GroupingPrivate) GroupingOrderType(required *props.OrderingChoice) GroupingOrderType {
inputOrdering := required.Intersection(&g.Ordering)
count := 0
for i := range inputOrdering.Columns {
// Get any grouping column from the set. Normally there would be at most one
// because we have rules that remove redundant grouping columns.
cols := inputOrdering.Group(i).Intersection(g.GroupingCols)
_, ok := cols.Next(0)
if !ok {
// This group refers to a column that is not a grouping column.
// The rest of the ordering is not useful.
break
}
count++
}
if count == g.GroupingCols.Len() || g.GroupingCols.Len() == 0 {
return Streaming
} else if count == 0 {
return NoStreaming
}
return PartialStreaming
}
| pkg/sql/opt/memo/expr.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00906977616250515,
0.0003702404210343957,
0.00015866132162045687,
0.00016893651627469808,
0.0009828436886891723
] |
{
"id": 7,
"code_window": [
"\t))\n",
"\t// Validate that the post serialization will recompute the constraint IDs\n",
"\t// if they are missing.\n",
"\tdesc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))\n",
"\t// If we set both the constraint ID / next value to 0, then we will have\n",
"\t// it assigned form scratch.\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\tdesc.NextConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(4))\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 75
} | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/cockroach/pkg/util/log/eventpb"
"github.com/cockroachdb/errors"
)
type dropSchemaNode struct {
n *tree.DropSchema
d *dropCascadeState
}
// Use to satisfy the linter.
var _ planNode = &dropSchemaNode{n: nil}
func (p *planner) DropSchema(ctx context.Context, n *tree.DropSchema) (planNode, error) {
if err := checkSchemaChangeEnabled(
ctx,
p.ExecCfg(),
"DROP SCHEMA",
); err != nil {
return nil, err
}
isAdmin, err := p.HasAdminRole(ctx)
if err != nil {
return nil, err
}
d := newDropCascadeState()
// Collect all schemas to be deleted.
for _, schema := range n.Names {
dbName := p.CurrentDatabase()
if schema.ExplicitCatalog {
dbName = schema.Catalog()
}
scName := schema.Schema()
db, err := p.Descriptors().GetMutableDatabaseByName(ctx, p.txn, dbName,
tree.DatabaseLookupFlags{Required: true})
if err != nil {
return nil, err
}
sc, err := p.Descriptors().GetSchemaByName(
ctx, p.txn, db, scName, tree.SchemaLookupFlags{
Required: false,
RequireMutable: true,
},
)
if err != nil {
return nil, err
}
if sc == nil {
if n.IfExists {
continue
}
return nil, pgerror.Newf(pgcode.InvalidSchemaName, "unknown schema %q", scName)
}
if scName == tree.PublicSchema {
return nil, pgerror.Newf(pgcode.InvalidSchemaName, "cannot drop schema %q", scName)
}
switch sc.SchemaKind() {
case catalog.SchemaPublic, catalog.SchemaVirtual, catalog.SchemaTemporary:
return nil, pgerror.Newf(pgcode.InvalidSchemaName, "cannot drop schema %q", scName)
case catalog.SchemaUserDefined:
hasOwnership, err := p.HasOwnership(ctx, sc)
if err != nil {
return nil, err
}
if !(isAdmin || hasOwnership) {
return nil, pgerror.Newf(pgcode.InsufficientPrivilege,
"must be owner of schema %s", tree.Name(sc.GetName()))
}
namesBefore := len(d.objectNamesToDelete)
if err := d.collectObjectsInSchema(ctx, p, db, sc); err != nil {
return nil, err
}
// We added some new objects to delete. Ensure that we have the correct
// drop behavior to be doing this.
if namesBefore != len(d.objectNamesToDelete) && n.DropBehavior != tree.DropCascade {
return nil, pgerror.Newf(pgcode.DependentObjectsStillExist,
"schema %q is not empty and CASCADE was not specified", scName)
}
sqltelemetry.IncrementUserDefinedSchemaCounter(sqltelemetry.UserDefinedSchemaDrop)
default:
return nil, errors.AssertionFailedf("unknown schema kind %d", sc.SchemaKind())
}
}
// The database descriptor is used to generate specific error messages when
// a database cannot be collected for dropping. The database descriptor is nil here
// because dropping a schema will never result in a database being collected and dropped.
// Also, schemas can belong to different databases, so it does not make sense to pass a single
// database descriptor.
if err := d.resolveCollectedObjects(ctx, p, nil /* db */); err != nil {
return nil, err
}
return &dropSchemaNode{n: n, d: d}, nil
}
func (n *dropSchemaNode) startExec(params runParams) error {
telemetry.Inc(sqltelemetry.SchemaChangeDropCounter("schema"))
ctx := params.ctx
p := params.p
// Drop all collected objects.
if err := n.d.dropAllCollectedObjects(ctx, p); err != nil {
return err
}
// Queue the job to actually drop the schema.
schemaIDs := make([]descpb.ID, len(n.d.schemasToDelete))
for i := range n.d.schemasToDelete {
sc := n.d.schemasToDelete[i].schema
schemaIDs[i] = sc.GetID()
db := n.d.schemasToDelete[i].dbDesc
mutDesc := sc.(*schemadesc.Mutable)
if err := p.dropSchemaImpl(ctx, db, mutDesc); err != nil {
return err
}
}
// Write out the change to the database.
for i := range n.d.schemasToDelete {
sc := n.d.schemasToDelete[i].schema
db := n.d.schemasToDelete[i].dbDesc
if err := p.writeNonDropDatabaseChange(
ctx, db,
fmt.Sprintf("updating parent database %s for %s", db.GetName(), sc.GetName()),
); err != nil {
return err
}
}
// Create the job to drop the schema.
if err := p.createDropSchemaJob(
schemaIDs,
n.d.getDroppedTableDetails(),
n.d.typesToDelete,
tree.AsStringWithFQNames(n.n, params.Ann()),
); err != nil {
return err
}
// Log Drop Schema event. This is an auditable log event and is recorded
// in the same transaction as table descriptor update.
for _, schemaToDelete := range n.d.schemasToDelete {
sc := schemaToDelete.schema
qualifiedSchemaName, err := p.getQualifiedSchemaName(params.ctx, sc)
if err != nil {
return err
}
if err := params.p.logEvent(params.ctx,
sc.GetID(),
&eventpb.DropSchema{
SchemaName: qualifiedSchemaName.String(),
}); err != nil {
return err
}
}
return nil
}
// dropSchemaImpl performs the logic of dropping a user defined schema. It does
// not create a job to perform the final cleanup of the schema.
func (p *planner) dropSchemaImpl(
ctx context.Context, parentDB *dbdesc.Mutable, sc *schemadesc.Mutable,
) error {
// Update parent database schemas mapping.
if p.execCfg.Settings.Version.IsActive(ctx, clusterversion.AvoidDrainingNames) {
delete(parentDB.Schemas, sc.GetName())
} else {
// TODO (rohany): This can be removed once RESTORE installs schemas into
// the parent database.
parentDB.AddSchemaToDatabase(sc.GetName(), descpb.DatabaseDescriptor_SchemaInfo{
ID: sc.GetID(),
Dropped: true,
})
}
// Update the schema descriptor as dropped.
sc.SetDropped()
// Populate namespace update batch.
b := p.txn.NewBatch()
p.dropNamespaceEntry(ctx, b, sc)
// Remove any associated comments.
if err := p.removeSchemaComment(ctx, sc.GetID()); err != nil {
return err
}
// Write the updated descriptor.
if err := p.writeSchemaDesc(ctx, sc); err != nil {
return err
}
// Run the namespace update batch.
return p.txn.Run(ctx, b)
}
func (p *planner) createDropSchemaJob(
schemas []descpb.ID,
tableDropDetails []jobspb.DroppedTableDetails,
typesToDrop []*typedesc.Mutable,
jobDesc string,
) error {
typeIDs := make([]descpb.ID, 0, len(typesToDrop))
for _, t := range typesToDrop {
typeIDs = append(typeIDs, t.ID)
}
_, err := p.extendedEvalCtx.QueueJob(p.EvalContext().Ctx(), jobs.Record{
Description: jobDesc,
Username: p.User(),
DescriptorIDs: schemas,
Details: jobspb.SchemaChangeDetails{
DroppedSchemas: schemas,
DroppedTables: tableDropDetails,
DroppedTypes: typeIDs,
DroppedDatabaseID: descpb.InvalidID,
// The version distinction for database jobs doesn't matter for jobs that
// drop schemas.
FormatVersion: jobspb.DatabaseJobFormatVersion,
},
Progress: jobspb.SchemaChangeProgress{},
NonCancelable: true,
})
return err
}
func (p *planner) removeSchemaComment(ctx context.Context, schemaID descpb.ID) error {
_, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.ExecEx(
ctx,
"delete-schema-comment",
p.txn,
sessiondata.InternalExecutorOverride{User: security.RootUserName()},
"DELETE FROM system.comments WHERE type=$1 AND object_id=$2 AND sub_id=0",
keys.SchemaCommentType,
schemaID)
return err
}
func (n *dropSchemaNode) Next(params runParams) (bool, error) { return false, nil }
func (n *dropSchemaNode) Values() tree.Datums { return tree.Datums{} }
func (n *dropSchemaNode) Close(ctx context.Context) {}
func (n *dropSchemaNode) ReadingOwnWrites() {}
| pkg/sql/drop_schema.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00023689147201366723,
0.00017014624609146267,
0.00015904591418802738,
0.00016800095909275115,
0.000013464999938150868
] |
{
"id": 7,
"code_window": [
"\t))\n",
"\t// Validate that the post serialization will recompute the constraint IDs\n",
"\t// if they are missing.\n",
"\tdesc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, \"defaultdb\", \"t\")\n",
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))\n",
"\t// If we set both the constraint ID / next value to 0, then we will have\n",
"\t// it assigned form scratch.\n",
"\tdesc.PrimaryIndex.ConstraintID = 0\n",
"\tdesc.NextConstraintID = 0\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\trequire.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(4))\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 75
} | stmt_block ::=
'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'
| docs/generated/sql/bnf/check_table_level.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0006708563305437565,
0.0006708563305437565,
0.0006708563305437565,
0.0006708563305437565,
0
] |
{
"id": 8,
"code_window": [
"\t\tdesc.DescriptorProto(),\n",
"\t))\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// are missing. Note: Constraint IDs on FKs and other objects will exist.\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 86
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.4935075640678406,
0.038652632385492325,
0.00016531131404917687,
0.0009759831009432673,
0.12630148231983185
] |
{
"id": 8,
"code_window": [
"\t\tdesc.DescriptorProto(),\n",
"\t))\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// are missing. Note: Constraint IDs on FKs and other objects will exist.\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 86
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvprober_test
import (
"bytes"
"context"
gosql "database/sql"
"fmt"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvprober"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
func TestProberDoesReadsAndWrites(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderShort(t)
ctx := context.Background()
t.Run("disabled by default", func(t *testing.T) {
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{})
defer cleanup()
kvprober.ReadInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
require.NoError(t, p.Start(ctx, s.Stopper()))
time.Sleep(100 * time.Millisecond)
require.Zero(t, p.Metrics().ProbePlanAttempts.Count())
require.Zero(t, p.Metrics().ReadProbeAttempts.Count())
require.Zero(t, p.Metrics().WriteProbeAttempts.Count())
})
t.Run("happy path", func(t *testing.T) {
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{})
defer cleanup()
kvprober.ReadEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.ReadInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
kvprober.WriteEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.WriteInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
require.NoError(t, p.Start(ctx, s.Stopper()))
testutils.SucceedsSoon(t, func() error {
if p.Metrics().ReadProbeAttempts.Count() < int64(50) {
return errors.Newf("read count too low: %v", p.Metrics().ReadProbeAttempts.Count())
}
if p.Metrics().WriteProbeAttempts.Count() < int64(50) {
return errors.Newf("write count too low: %v", p.Metrics().WriteProbeAttempts.Count())
}
return nil
})
require.Zero(t, p.Metrics().ReadProbeFailures.Count())
require.Zero(t, p.Metrics().WriteProbeFailures.Count())
require.Zero(t, p.Metrics().ProbePlanFailures.Count())
})
t.Run("a single range is unavailable for all KV ops", func(t *testing.T) {
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error {
for _, ru := range ba.Requests {
key := ru.GetInner().Header().Key
if bytes.HasPrefix(key, keys.TimeseriesPrefix) {
return roachpb.NewError(fmt.Errorf("boom"))
}
}
return nil
},
},
})
defer cleanup()
kvprober.ReadEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.ReadInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
kvprober.WriteEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.WriteInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
require.NoError(t, p.Start(ctx, s.Stopper()))
// Expect >=2 failures eventually due to unavailable time-series range.
// TODO(josh): Once structured logging is in, can check that failures
// involved only the time-series range.
testutils.SucceedsSoon(t, func() error {
if p.Metrics().ReadProbeFailures.Count() < int64(2) {
return errors.Newf("read error count too low: %v", p.Metrics().ReadProbeFailures.Count())
}
if p.Metrics().WriteProbeFailures.Count() < int64(2) {
return errors.Newf("write error count too low: %v", p.Metrics().WriteProbeFailures.Count())
}
return nil
})
require.Zero(t, p.Metrics().ProbePlanFailures.Count())
})
t.Run("all ranges are unavailable for Gets only", func(t *testing.T) {
var dbIsAvailable syncutil.AtomicBool
dbIsAvailable.Set(true)
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error {
if !dbIsAvailable.Get() {
for _, ru := range ba.Requests {
if ru.GetGet() != nil {
return roachpb.NewError(fmt.Errorf("boom"))
}
}
return nil
}
return nil
},
},
})
defer cleanup()
// Want server to startup successfully then become unavailable.
dbIsAvailable.Set(false)
kvprober.ReadEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.ReadInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
kvprober.WriteEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.WriteInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
// Probe exactly ten times so we can make assertions below.
for i := 0; i < 10; i++ {
p.ReadProbe(ctx, s.DB())
p.WriteProbe(ctx, s.DB())
}
// Expect all read probes to fail but write probes & planning to succeed.
require.Equal(t, int64(10), p.Metrics().ReadProbeAttempts.Count())
require.Equal(t, int64(10), p.Metrics().ReadProbeFailures.Count())
require.Equal(t, int64(10), p.Metrics().WriteProbeAttempts.Count())
require.Zero(t, p.Metrics().WriteProbeFailures.Count())
require.Zero(t, p.Metrics().ProbePlanFailures.Count())
})
t.Run("all ranges are unavailable for Puts only", func(t *testing.T) {
var dbIsAvailable syncutil.AtomicBool
dbIsAvailable.Set(true)
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{
Store: &kvserver.StoreTestingKnobs{
TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error {
if !dbIsAvailable.Get() {
for _, ru := range ba.Requests {
if ru.GetPut() != nil {
return roachpb.NewError(fmt.Errorf("boom"))
}
}
return nil
}
return nil
},
},
})
defer cleanup()
// Want server to startup successfully then become unavailable.
dbIsAvailable.Set(false)
kvprober.ReadEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.ReadInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
kvprober.WriteEnabled.Override(ctx, &s.ClusterSettings().SV, true)
kvprober.WriteInterval.Override(ctx, &s.ClusterSettings().SV, 5*time.Millisecond)
// Probe exactly ten times so we can make assertions below.
for i := 0; i < 10; i++ {
p.ReadProbe(ctx, s.DB())
p.WriteProbe(ctx, s.DB())
}
// Expect all write probes to fail but read probes & planning to succeed.
require.Equal(t, int64(10), p.Metrics().WriteProbeAttempts.Count())
require.Equal(t, int64(10), p.Metrics().WriteProbeFailures.Count())
require.Equal(t, int64(10), p.Metrics().ReadProbeAttempts.Count())
require.Zero(t, p.Metrics().ReadProbeFailures.Count())
require.Zero(t, p.Metrics().ProbePlanFailures.Count())
})
}
func TestWriteProbeDoesNotLeaveLiveData(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderShort(t)
ctx := context.Background()
s, _, p, cleanup := initTestProber(t, base.TestingKnobs{})
defer cleanup()
kvprober.WriteEnabled.Override(ctx, &s.ClusterSettings().SV, true)
lastStep := p.WriteProbeReturnLastStep(ctx, s.DB())
// Expect write probe to succeed.
require.Equal(t, int64(1), p.Metrics().WriteProbeAttempts.Count())
require.Zero(t, p.Metrics().WriteProbeFailures.Count())
require.Zero(t, p.Metrics().ProbePlanFailures.Count())
// Expect no **live** data at the key kvprober writes at.
// TODO(josh): One can imagine comparing a checksum of all the live data
// in a range, before and after the write probe is sent. This would be a
// better test than what is below, if one can guarantee the live data
// in the range won't change for some reason other than kvprober. The
// below test is too fragile, in that it relies on kvprober implementation
// details to check for the presence of a live data, meaning it will succeed
// if live data is not present (desirable) or if the kvprober implementation
// details change (not desirable).
got, err := s.DB().Get(ctx, lastStep.Key)
require.NoError(t, err)
require.False(t, got.Exists(), got.PrettyValue())
}
func TestPlannerMakesPlansCoveringAllRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderShort(t)
ctx := context.Background()
_, sqlDB, p, cleanup := initTestProber(t, base.TestingKnobs{})
defer cleanup()
rangeIDToTimesWouldBeProbed := make(map[int64]int)
test := func(n int) {
var numRanges int64
if err := sqlDB.QueryRow(
"SELECT count(*) FROM crdb_internal.ranges").Scan(&numRanges); err != nil {
require.True(t, false)
}
log.Infof(ctx, "want numRanges %v", numRanges)
require.Eventually(t, func() bool {
step, err := p.ReadPlannerNext(ctx)
require.NoError(t, err)
rangeIDToTimesWouldBeProbed[int64(step.RangeID)]++
log.Infof(ctx, "current rangeID to times would be probed map: %v", rangeIDToTimesWouldBeProbed)
for i := int64(1); i <= numRanges; i++ {
// Expect all ranges to eventually be returned by Next n or n+1 times.
// Can't expect all ranges to be returned by Next exactly n times,
// as the order in which the lowest ordinal ranges are returned by
// Next the nth+1 time and the highest ordinal ranges are returned by
// Next the nth time is NOT specified. The reason for this is
// that we make plans in batches of a constant size and then randomize
// the order of the batch. See plan.go for more.
if rangeIDToTimesWouldBeProbed[i] != n && rangeIDToTimesWouldBeProbed[i] != n+1 {
return false
}
}
return true
}, time.Second, time.Millisecond)
}
for i := 0; i < 20; i++ {
test(i)
}
}
func initTestProber(
t *testing.T, knobs base.TestingKnobs,
) (serverutils.TestServerInterface, *gosql.DB, *kvprober.Prober, func()) {
s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{
Settings: cluster.MakeClusterSettings(),
Knobs: knobs,
})
p := kvprober.NewProber(kvprober.Opts{
Tracer: s.TracerI().(*tracing.Tracer),
DB: kvDB,
HistogramWindowInterval: time.Minute, // actual value not important to test
Settings: s.ClusterSettings(),
})
// Given small test cluster, this better exercises the planning logic.
kvprober.NumStepsToPlanAtOnce.Override(context.Background(), &s.ClusterSettings().SV, 10)
// Want these tests to run as fast as possible; see planner_test.go for a
// unit test of the rate limiting.
p.SetPlanningRateLimits(0)
return s, sqlDB, p, func() {
s.Stopper().Stop(context.Background())
}
}
| pkg/kv/kvprober/kvprober_integration_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0002877223305404186,
0.0001743489847285673,
0.00016232601774390787,
0.0001679966226220131,
0.000023235059416037984
] |
{
"id": 8,
"code_window": [
"\t\tdesc.DescriptorProto(),\n",
"\t))\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// are missing. Note: Constraint IDs on FKs and other objects will exist.\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 86
} | # With two out of five replicas remaining, check that replica with highest
# range applied index is chosen regardless of replica storeID.
# We have a 5-way replication and have two out of five nodes left so quorum
# is lost.
replication-data
- StoreID: 1
RangeID: 1
StartKey: /Min
EndKey: /Max
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 2, StoreID: 2, ReplicaID: 2}
- { NodeID: 3, StoreID: 3, ReplicaID: 3} # replicas 3-5 are located on unavailable stores
- { NodeID: 4, StoreID: 4, ReplicaID: 4}
- { NodeID: 5, StoreID: 5, ReplicaID: 5}
RangeAppliedIndex: 11 # this replica has higher applied index so is preferred over the other one
RaftCommittedIndex: 13
- StoreID: 2
RangeID: 1
StartKey: /Min
EndKey: /Max
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 2, StoreID: 2, ReplicaID: 2}
- { NodeID: 3, StoreID: 3, ReplicaID: 3} # replicas 3-5 are located on unavailable stores
- { NodeID: 4, StoreID: 4, ReplicaID: 4}
- { NodeID: 5, StoreID: 5, ReplicaID: 5}
RangeAppliedIndex: 10 # applied index takes precedence over store ID so this replica loses
RaftCommittedIndex: 14 # committed index while higher, should not confuse planner and use applied index
----
ok
collect-replica-info stores=(1,2)
----
ok
make-plan
----
- RangeID: 1
StartKey: /Min
OldReplicaID: 1
NewReplica:
NodeID: 1
StoreID: 1
ReplicaID: 16
NextReplicaID: 17
apply-plan stores=(1,2)
----
ok
dump-store stores=(1,2)
----
- NodeID: 1
StoreID: 1
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 16}
- NodeID: 2
StoreID: 2
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2}
- Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3}
- Replica: {NodeID: 4, StoreID: 4, ReplicaID: 4}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 5}
dump-events stores=(1,2)
----
Updated range r1, Key:/Min, Store:s1 ReplicaID:16
# Second use case where stale replica which remained from before split
# on store with higher ID will conflict with later one spanning smaller range.
# We have a stale replica in s2 which still remembers group across s3 and s4
# but they are not available anymore. While LHS and RHS across s1, s4, s5 are
# now more recent. Stale replica loses based on raft applied index being lower.
replication-data
- StoreID: 1 # this is the LHS replica post split
RangeID: 1
StartKey: /Min
EndKey: /Table/1
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
- StoreID: 1 # this is the RHS replica post split
RangeID: 2
StartKey: /Table/1
EndKey: /Max
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
- StoreID: 2
RangeID: 1 # this is the old version of range which got stale
StartKey: /Min
EndKey: /Max
Replicas:
- { NodeID: 2, StoreID: 2, ReplicaID: 2}
- { NodeID: 3, StoreID: 3, ReplicaID: 3}
- { NodeID: 4, StoreID: 4, ReplicaID: 4}
RangeAppliedIndex: 10
RaftCommittedIndex: 13
- StoreID: 5
RangeID: 1 # this is the LHS replica post split
StartKey: /Min
EndKey: /Table/1
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
- StoreID: 5
RangeID: 2 # this is the RHS replica post split
StartKey: /Table/1
EndKey: /Max
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
- StoreID: 6
RangeID: 1 # this is the LHS replica post split
StartKey: /Min
EndKey: /Table/1
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
- StoreID: 6
RangeID: 2 # this is the RHS replica post split
StartKey: /Table/1
EndKey: /Max
Replicas:
- { NodeID: 1, StoreID: 1, ReplicaID: 1}
- { NodeID: 5, StoreID: 5, ReplicaID: 6}
- { NodeID: 6, StoreID: 6, ReplicaID: 7}
RangeAppliedIndex: 15
RaftCommittedIndex: 15
----
ok
collect-replica-info stores=(1,2,5,6)
----
ok
make-plan
----
[]
apply-plan stores=(1,2,5,6)
----
ok
dump-store stores=(1,2,5,6)
----
- NodeID: 1
StoreID: 1
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
- RangeID: 2
StartKey: /Table/1
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
- NodeID: 2
StoreID: 2
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 2, StoreID: 2, ReplicaID: 2}
- Replica: {NodeID: 3, StoreID: 3, ReplicaID: 3}
- Replica: {NodeID: 4, StoreID: 4, ReplicaID: 4}
- NodeID: 5
StoreID: 5
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
- RangeID: 2
StartKey: /Table/1
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
- NodeID: 6
StoreID: 6
Descriptors:
- RangeID: 1
StartKey: /Min
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
- RangeID: 2
StartKey: /Table/1
Replicas:
- Replica: {NodeID: 1, StoreID: 1, ReplicaID: 1}
- Replica: {NodeID: 5, StoreID: 5, ReplicaID: 6}
- Replica: {NodeID: 6, StoreID: 6, ReplicaID: 7}
| pkg/kv/kvserver/loqrecovery/testdata/max_applied_voter_wins | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001739052531775087,
0.00017032136383932084,
0.00016753448289819062,
0.0001698472915450111,
0.000001582678919476166
] |
{
"id": 8,
"code_window": [
"\t\tdesc.DescriptorProto(),\n",
"\t))\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t// are missing. Note: Constraint IDs on FKs and other objects will exist.\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 86
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rsg
import (
"fmt"
"testing"
)
const yaccExample = `
name:
IDENT
| unreserved_keyword
| col_name_keyword
unreserved_keyword:
ABORT
| ACTION
| ADD
| ADMIN
col_name_keyword:
ANNOTATE_TYPE
| BETWEEN
| BIGINT
| BIT
column_name: name
constraint_name: name
column_def:
column_name typename col_qual_list
{
tableDef, err := tree.NewColumnTableDef(tree.Name($1), $2.colType(), $3.colQuals())
if err != nil {
sqllex.Error(err.Error())
return 1
}
$$.val = tableDef
}
col_qual_list:
col_qual_list col_qualification
{
$$.val = append($1.colQuals(), $2.colQual())
}
| /* EMPTY */
{
$$.val = []tree.NamedColumnQualification(nil)
}
col_qualification:
CONSTRAINT constraint_name col_qualification_elem
{
$$.val = tree.NamedColumnQualification{Name: tree.Name($2), Qualification: $3.colQualElem()}
}
| col_qualification_elem
{
$$.val = tree.NamedColumnQualification{Qualification: $1.colQualElem()}
}
col_qualification_elem:
NOT NULL
{
$$.val = tree.NotNullConstraint{}
}
| NULL
{
$$.val = tree.NullConstraint{}
}
| UNIQUE
{
$$.val = tree.UniqueConstraint{}
}
| PRIMARY KEY
{
$$.val = tree.PrimaryKeyConstraint{}
}
`
func getRSG(t *testing.T) *RSG {
r, err := NewRSG(1, yaccExample, false)
if err != nil {
t.Fatal(err)
}
return r
}
func TestGenerate(t *testing.T) {
tests := []struct {
root string
depth int
repetitions int
expected []string
}{
{
root: "column_def",
depth: 20,
repetitions: 10,
expected: []string{
"BIT typename",
"ANNOTATE_TYPE typename CONSTRAINT ADD PRIMARY KEY NULL",
"ident typename PRIMARY KEY CONSTRAINT ident NULL",
"BETWEEN typename NULL",
"ADD typename",
"ABORT typename",
"ACTION typename",
"BIGINT typename",
"ident typename",
"BETWEEN typename CONSTRAINT ident UNIQUE",
},
},
}
for _, tc := range tests {
t.Run(fmt.Sprintf("%s-%d-%d", tc.root, tc.depth, tc.repetitions), func(t *testing.T) {
r := getRSG(t)
out := make([]string, tc.repetitions)
for i := range out {
out[i] = r.Generate(tc.root, tc.depth)
}
// Enable to help with writing tests.
if false {
for _, o := range out {
fmt.Printf("%q,\n", o)
}
return
}
if len(out) != len(tc.expected) {
t.Fatal("unexpected")
}
for i, o := range out {
if o != tc.expected[i] {
t.Fatalf("got %q, expected %q", o, tc.expected[i])
}
}
})
}
}
| pkg/internal/rsg/rsg_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0006500583840534091,
0.00022416136926040053,
0.00016620430687908083,
0.00017345574451610446,
0.00012471849913708866
] |
{
"id": 9,
"code_window": [
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package migrations_test
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/require"
)
// TestEnsureConstraintIDs tests that constraint IDs are added as expected.
func TestEnsureConstraintIDs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
// Start off with the version that did not support
// constraint IDs.
clusterArgs := base.TestClusterArgs{
ServerArgs: base.TestServerArgs{
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
DisableAutomaticVersionUpgrade: 1,
BinaryVersionOverride: clusterversion.ByKey(
tabledesc.ConstraintIDsAddedToTableDescsVersion - 1),
},
},
},
}
c := keys.SystemSQLCodec
ctx := context.Background()
tc := testcluster.StartTestCluster(t, 1, clusterArgs)
s := tc.Server(0)
defer tc.Stopper().Stop(ctx)
sqlDB := tc.ServerConn(0)
tdb := sqlutils.MakeSQLRunner(sqlDB)
// Create table with a primary key constraint.
tdb.Exec(t, "CREATE TABLE t(name int primary key)")
// Validate the comments on constraints are blocked.
tdb.ExpectErr(t,
"pq: cannot comment on constraint",
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
// Validate that we have a constraint ID due to post deserialization logic
desc := desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
desc.PrimaryIndex.ConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the post serialization will recompute the constraint IDs
// if they are missing.
desc = desctestutils.TestingGetMutableExistingTableDescriptor(s.DB(), c, "defaultdb", "t")
require.Equal(t, desc.PrimaryIndex.ConstraintID, descpb.ConstraintID(2))
// If we set both the constraint ID / next value to 0, then we will have
// it assigned form scratch.
desc.PrimaryIndex.ConstraintID = 0
desc.NextConstraintID = 0
require.NoError(t, s.DB().Put(
context.Background(),
catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()),
desc.DescriptorProto(),
))
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"false", "false"}},
)
// Migrate to the new cluster version.
tdb.Exec(t, `SET CLUSTER SETTING version = $1`,
clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String())
tdb.CheckQueryResultsRetry(t, "SHOW CLUSTER SETTING version",
[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})
// Validate the constraint IDs are populated.
// Validate that the descriptor is invalid, since the constraint IDs
// are missing.
tdb.CheckQueryResults(t,
`SELECT strpos(desc_json, 'constraintId') > 0,
strpos(desc_json, 'nextConstraintId') > 0
FROM (
SELECT jsonb_pretty(
crdb_internal.pb_to_json(
'cockroach.sql.sqlbase.Descriptor',
descriptor,
false
)
) AS desc_json
FROM system.descriptor
WHERE id = `+
fmt.Sprintf("%d", desc.GetID())+
`);`,
[][]string{{"true", "true"}},
)
// Validate we can comment constraints.
tdb.Exec(t,
"COMMENT ON CONSTRAINT \"t_pkey\" ON t IS 'primary_comment'")
}
| pkg/migration/migrations/ensure_constraint_id_test.go | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.8462963700294495,
0.06352663785219193,
0.0001649696205276996,
0.0006820012349635363,
0.21725063025951385
] |
{
"id": 9,
"code_window": [
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/cancelchecker"
"github.com/cockroachdb/cockroach/pkg/util/optional"
"github.com/cockroachdb/errors"
)
// mergeJoiner performs merge join, it has two input row sources with the same
// ordering on the columns that have equality constraints.
//
// It is guaranteed that the results preserve this ordering.
type mergeJoiner struct {
joinerBase
cancelChecker cancelchecker.CancelChecker
leftSource, rightSource execinfra.RowSource
leftRows, rightRows []rowenc.EncDatumRow
leftIdx, rightIdx int
trackMatchedRight bool
emitUnmatchedRight bool
matchedRight util.FastIntSet
matchedRightCount int
streamMerger streamMerger
}
var _ execinfra.Processor = &mergeJoiner{}
var _ execinfra.RowSource = &mergeJoiner{}
var _ execinfra.OpNode = &mergeJoiner{}
const mergeJoinerProcName = "merge joiner"
func newMergeJoiner(
flowCtx *execinfra.FlowCtx,
processorID int32,
spec *execinfrapb.MergeJoinerSpec,
leftSource execinfra.RowSource,
rightSource execinfra.RowSource,
post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver,
) (*mergeJoiner, error) {
leftEqCols := make([]uint32, 0, len(spec.LeftOrdering.Columns))
rightEqCols := make([]uint32, 0, len(spec.RightOrdering.Columns))
for i, c := range spec.LeftOrdering.Columns {
if spec.RightOrdering.Columns[i].Direction != c.Direction {
return nil, errors.New("unmatched column orderings")
}
leftEqCols = append(leftEqCols, c.ColIdx)
rightEqCols = append(rightEqCols, spec.RightOrdering.Columns[i].ColIdx)
}
m := &mergeJoiner{
leftSource: leftSource,
rightSource: rightSource,
trackMatchedRight: shouldEmitUnmatchedRow(rightSide, spec.Type) || spec.Type == descpb.RightSemiJoin,
}
if execinfra.ShouldCollectStats(flowCtx.EvalCtx.Ctx(), flowCtx) {
m.leftSource = newInputStatCollector(m.leftSource)
m.rightSource = newInputStatCollector(m.rightSource)
m.ExecStatsForTrace = m.execStatsForTrace
}
if err := m.joinerBase.init(
m /* self */, flowCtx, processorID, leftSource.OutputTypes(), rightSource.OutputTypes(),
spec.Type, spec.OnExpr, leftEqCols, rightEqCols, false, /* outputContinuationColumn */
post, output,
execinfra.ProcStateOpts{
InputsToDrain: []execinfra.RowSource{leftSource, rightSource},
TrailingMetaCallback: func() []execinfrapb.ProducerMetadata {
m.close()
return nil
},
},
); err != nil {
return nil, err
}
m.MemMonitor = execinfra.NewMonitor(flowCtx.EvalCtx.Ctx(), flowCtx.EvalCtx.Mon, "mergejoiner-mem")
var err error
m.streamMerger, err = makeStreamMerger(
m.leftSource,
execinfrapb.ConvertToColumnOrdering(spec.LeftOrdering),
m.rightSource,
execinfrapb.ConvertToColumnOrdering(spec.RightOrdering),
spec.NullEquality,
m.MemMonitor,
)
if err != nil {
return nil, err
}
return m, nil
}
// Start is part of the RowSource interface.
func (m *mergeJoiner) Start(ctx context.Context) {
ctx = m.StartInternal(ctx, mergeJoinerProcName)
m.streamMerger.start(ctx)
m.cancelChecker.Reset(ctx)
}
// Next is part of the Processor interface.
func (m *mergeJoiner) Next() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
for m.State == execinfra.StateRunning {
row, meta := m.nextRow()
if meta != nil {
if meta.Err != nil {
m.MoveToDraining(nil /* err */)
}
return nil, meta
}
if row == nil {
m.MoveToDraining(nil /* err */)
break
}
if outRow := m.ProcessRowHelper(row); outRow != nil {
return outRow, nil
}
}
return nil, m.DrainHelper()
}
func (m *mergeJoiner) nextRow() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
// The loops below form a restartable state machine that iterates over a
// batch of rows from the left and right side of the join. The state machine
// returns a result for every row that should be output.
for {
for m.leftIdx < len(m.leftRows) {
// We have unprocessed rows from the left-side batch.
lrow := m.leftRows[m.leftIdx]
for m.rightIdx < len(m.rightRows) {
// We have unprocessed rows from the right-side batch.
ridx := m.rightIdx
m.rightIdx++
if (m.joinType == descpb.RightSemiJoin || m.joinType == descpb.RightAntiJoin) && m.matchedRight.Contains(ridx) {
// Right semi/anti joins only need to know whether the
// right row has a match, and we already know that for
// ridx. Furthermore, we have already emitted this row in
// case of right semi, so we need to skip it for
// correctness as well.
continue
}
renderedRow, err := m.render(lrow, m.rightRows[ridx])
if err != nil {
return nil, &execinfrapb.ProducerMetadata{Err: err}
}
if renderedRow != nil {
m.matchedRightCount++
if m.trackMatchedRight {
m.matchedRight.Add(ridx)
}
if m.joinType == descpb.LeftAntiJoin || m.joinType == descpb.ExceptAllJoin {
// We know that the current left row has a match and is
// not included in the output, so we can stop
// processing the right-side batch.
break
}
if m.joinType == descpb.RightAntiJoin {
// We don't emit the current right row because it has a
// match on the left, so we move onto the next right
// row.
continue
}
if m.joinType == descpb.LeftSemiJoin || m.joinType == descpb.IntersectAllJoin {
// Semi-joins and INTERSECT ALL only need to know if there is at
// least one match, so can skip the rest of the right rows.
m.rightIdx = len(m.rightRows)
}
return renderedRow, nil
}
}
// Perform the cancellation check. We don't perform this on every row,
// but once for every iteration through the right-side batch.
if err := m.cancelChecker.Check(); err != nil {
return nil, &execinfrapb.ProducerMetadata{Err: err}
}
// We've exhausted the right-side batch. Adjust the indexes for the next
// row from the left-side of the batch.
m.leftIdx++
m.rightIdx = 0
// For INTERSECT ALL and EXCEPT ALL, adjust rightIdx to skip all
// previously matched rows on the next right-side iteration, since we
// don't want to match them again.
if m.joinType.IsSetOpJoin() {
m.rightIdx = m.leftIdx
}
// If we didn't match any rows on the right-side of the batch and this is
// a left outer join, full outer join, left anti join, or EXCEPT ALL, emit an
// unmatched left-side row.
if m.matchedRightCount == 0 && shouldEmitUnmatchedRow(leftSide, m.joinType) {
return m.renderUnmatchedRow(lrow, leftSide), nil
}
m.matchedRightCount = 0
}
// We've exhausted the left-side batch. If this is a right/full outer
// or right anti join, emit unmatched right-side rows.
if m.emitUnmatchedRight {
for m.rightIdx < len(m.rightRows) {
ridx := m.rightIdx
m.rightIdx++
if m.matchedRight.Contains(ridx) {
continue
}
return m.renderUnmatchedRow(m.rightRows[ridx], rightSide), nil
}
m.emitUnmatchedRight = false
}
// Retrieve the next batch of rows to process.
var meta *execinfrapb.ProducerMetadata
// TODO(paul): Investigate (with benchmarks) whether or not it's
// worthwhile to only buffer one row from the right stream per batch
// for semi-joins.
m.leftRows, m.rightRows, meta = m.streamMerger.NextBatch(m.Ctx, m.EvalCtx)
if meta != nil {
return nil, meta
}
if m.leftRows == nil && m.rightRows == nil {
return nil, nil
}
// Prepare for processing the next batch.
m.emitUnmatchedRight = shouldEmitUnmatchedRow(rightSide, m.joinType)
m.leftIdx, m.rightIdx = 0, 0
if m.trackMatchedRight {
m.matchedRight = util.FastIntSet{}
}
}
}
func (m *mergeJoiner) close() {
if m.InternalClose() {
m.streamMerger.close(m.Ctx)
m.MemMonitor.Stop(m.Ctx)
}
}
// ConsumerClosed is part of the RowSource interface.
func (m *mergeJoiner) ConsumerClosed() {
// The consumer is done, Next() will not be called again.
m.close()
}
// execStatsForTrace implements ProcessorBase.ExecStatsForTrace.
func (m *mergeJoiner) execStatsForTrace() *execinfrapb.ComponentStats {
lis, ok := getInputStats(m.leftSource)
if !ok {
return nil
}
ris, ok := getInputStats(m.rightSource)
if !ok {
return nil
}
return &execinfrapb.ComponentStats{
Inputs: []execinfrapb.InputStats{lis, ris},
Exec: execinfrapb.ExecStats{
MaxAllocatedMem: optional.MakeUint(uint64(m.MemMonitor.MaximumBytes())),
},
Output: m.OutputHelper.Stats(),
}
}
// ChildCount is part of the execinfra.OpNode interface.
func (m *mergeJoiner) ChildCount(verbose bool) int {
if _, ok := m.leftSource.(execinfra.OpNode); ok {
if _, ok := m.rightSource.(execinfra.OpNode); ok {
return 2
}
}
return 0
}
// Child is part of the execinfra.OpNode interface.
func (m *mergeJoiner) Child(nth int, verbose bool) execinfra.OpNode {
switch nth {
case 0:
if n, ok := m.leftSource.(execinfra.OpNode); ok {
return n
}
panic("left input to mergeJoiner is not an execinfra.OpNode")
case 1:
if n, ok := m.rightSource.(execinfra.OpNode); ok {
return n
}
panic("right input to mergeJoiner is not an execinfra.OpNode")
default:
panic(errors.AssertionFailedf("invalid index %d", nth))
}
}
| pkg/sql/rowexec/mergejoiner.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00021756910427939147,
0.00017231676611118019,
0.0001634004875086248,
0.00017117575043812394,
0.000008851677193888463
] |
{
"id": 9,
"code_window": [
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package catconstants
const (
// NamespaceTableFamilyID is the column family of the namespace table which is
// actually written to.
NamespaceTableFamilyID = 4
// NamespaceTablePrimaryIndexID is the id of the primary index of the
// namespace table.
NamespaceTablePrimaryIndexID = 1
// PreMigrationNamespaceTableName is the name that was used on the descriptor
// of the current namespace table before the DeprecatedNamespaceTable was
// migrated away.
PreMigrationNamespaceTableName = "namespace2"
)
| pkg/sql/catalog/catconstants/namespace.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017719584866426885,
0.00017070827016141266,
0.00016271360800601542,
0.0001722153538139537,
0.0000060076231420680415
] |
{
"id": 9,
"code_window": [
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 88
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package descs
import (
"context"
"strings"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catconstants"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// GetMutableDescriptorsByID returns a mutable implementation of the descriptors
// with the requested ids. An error is returned if no descriptor exists.
func (tc *Collection) GetMutableDescriptorsByID(
ctx context.Context, txn *kv.Txn, ids ...descpb.ID,
) ([]catalog.MutableDescriptor, error) {
flags := tree.CommonLookupFlags{
Required: true,
RequireMutable: true,
IncludeOffline: true,
IncludeDropped: true,
}
descs, err := tc.getDescriptorsByID(ctx, txn, flags, ids...)
if err != nil {
return nil, err
}
ret := make([]catalog.MutableDescriptor, len(descs))
for i, desc := range descs {
ret[i] = desc.(catalog.MutableDescriptor)
}
return ret, nil
}
// GetMutableDescriptorByID delegates to GetMutableDescriptorsByID.
func (tc *Collection) GetMutableDescriptorByID(
ctx context.Context, txn *kv.Txn, id descpb.ID,
) (catalog.MutableDescriptor, error) {
descs, err := tc.GetMutableDescriptorsByID(ctx, txn, id)
if err != nil {
return nil, err
}
return descs[0], nil
}
// GetImmutableDescriptorsByID returns an immutable implementation of the
// descriptors with the requested ids. An error is returned if no descriptor
// exists, regardless of whether the Required flag is set or not.
func (tc *Collection) GetImmutableDescriptorsByID(
ctx context.Context, txn *kv.Txn, flags tree.CommonLookupFlags, ids ...descpb.ID,
) ([]catalog.Descriptor, error) {
flags.RequireMutable = false
return tc.getDescriptorsByID(ctx, txn, flags, ids...)
}
// GetImmutableDescriptorByID delegates to GetImmutableDescriptorsByID.
func (tc *Collection) GetImmutableDescriptorByID(
ctx context.Context, txn *kv.Txn, id descpb.ID, flags tree.CommonLookupFlags,
) (catalog.Descriptor, error) {
descs, err := tc.GetImmutableDescriptorsByID(ctx, txn, flags, id)
if err != nil {
return nil, err
}
return descs[0], nil
}
// getDescriptorsByID returns a slice of descriptors by ID according to the
// provided lookup flags. Note that flags.Required is ignored, and an error is
// always returned if no descriptor with the ID exists.
func (tc *Collection) getDescriptorsByID(
ctx context.Context, txn *kv.Txn, flags tree.CommonLookupFlags, ids ...descpb.ID,
) (descs []catalog.Descriptor, err error) {
defer func() {
if err == nil {
err = filterDescriptorsStates(descs, flags)
}
if err != nil {
descs = nil
}
}()
log.VEventf(ctx, 2, "looking up descriptors for ids %d", ids)
descs = make([]catalog.Descriptor, len(ids))
{
// Look up the descriptors in all layers except the KV layer on a
// best-effort basis.
q := byIDLookupContext{
ctx: ctx,
txn: txn,
tc: tc,
flags: flags,
}
for _, fn := range []func(id descpb.ID) (catalog.Descriptor, error){
q.lookupVirtual,
q.lookupSynthetic,
q.lookupUncommitted,
q.lookupLeased,
} {
for i, id := range ids {
if descs[i] != nil {
continue
}
desc, err := fn(id)
if err != nil {
return nil, err
}
descs[i] = desc
}
}
}
kvIDs := make([]descpb.ID, 0, len(ids))
indexes := make([]int, 0, len(ids))
for i, id := range ids {
if descs[i] != nil {
continue
}
kvIDs = append(kvIDs, id)
indexes = append(indexes, i)
}
if len(kvIDs) == 0 {
// No KV lookup necessary, return early.
return descs, nil
}
kvDescs, err := tc.withReadFromStore(flags.RequireMutable, func() ([]catalog.MutableDescriptor, error) {
return tc.kv.getByIDs(ctx, txn, tc.version, kvIDs)
})
if err != nil {
return nil, err
}
for j, desc := range kvDescs {
descs[indexes[j]] = desc
}
return descs, nil
}
// byIDLookupContext is a helper struct for getDescriptorsByID which contains
// the parameters for looking up descriptors by ID at various levels in the
// Collection.
type byIDLookupContext struct {
ctx context.Context
txn *kv.Txn
tc *Collection
flags tree.CommonLookupFlags
}
func (q *byIDLookupContext) lookupVirtual(id descpb.ID) (catalog.Descriptor, error) {
return q.tc.virtual.getByID(q.ctx, id, q.flags.RequireMutable)
}
func (q *byIDLookupContext) lookupSynthetic(id descpb.ID) (catalog.Descriptor, error) {
if q.flags.AvoidSynthetic {
return nil, nil
}
_, sd := q.tc.synthetic.getByID(id)
if sd == nil {
return nil, nil
}
if q.flags.RequireMutable {
return nil, newMutableSyntheticDescriptorAssertionError(sd.GetID())
}
return sd, nil
}
func (q *byIDLookupContext) lookupUncommitted(id descpb.ID) (_ catalog.Descriptor, err error) {
ud := q.tc.uncommitted.getByID(id)
if ud == nil {
return nil, nil
}
log.VEventf(q.ctx, 2, "found uncommitted descriptor %d", id)
if !q.flags.RequireMutable {
return ud, nil
}
return q.tc.uncommitted.checkOut(id)
}
func (q *byIDLookupContext) lookupLeased(id descpb.ID) (catalog.Descriptor, error) {
if q.flags.AvoidLeased || q.flags.RequireMutable || lease.TestingTableLeasesAreDisabled() {
return nil, nil
}
// If we have already read all of the descriptors, use it as a negative
// cache to short-circuit a lookup we know will be doomed to fail.
//
// TODO(ajwerner): More generally leverage this set of kv descriptors on
// the resolution path.
if q.tc.kv.idDefinitelyDoesNotExist(id) {
return nil, catalog.ErrDescriptorNotFound
}
desc, shouldReadFromStore, err := q.tc.leased.getByID(q.ctx, q.tc.deadlineHolder(q.txn), id)
if err != nil || shouldReadFromStore {
return nil, err
}
return desc, nil
}
// filterDescriptorsStates is a helper function for getDescriptorsByID.
func filterDescriptorsStates(descs []catalog.Descriptor, flags tree.CommonLookupFlags) error {
for _, desc := range descs {
// The first return value can safely be ignored, it will always be false
// because the required flag is set.
_, err := filterDescriptorState(desc, true /* required */, flags)
if err == nil {
continue
}
if desc.Adding() && (desc.IsUncommittedVersion() || flags.AvoidLeased || flags.RequireMutable) {
// This is a special case for tables in the adding state: Roughly speaking,
// we always need to resolve tables in the adding state by ID when they were
// newly created in the transaction for DDL statements and for some
// information queries (but not for ordinary name resolution for queries/
// DML), but we also need to make these tables public in the schema change
// job in a separate transaction.
// TODO (lucy): We need something like an IncludeAdding flag so that callers
// can specify this behavior, instead of having the collection infer the
// desired behavior based on the flags (and likely producing unintended
// behavior). See the similar comment on etDescriptorByName, which covers
// the ordinary name resolution path as well as DDL statements.
continue
}
return err
}
return nil
}
func (tc *Collection) getByName(
ctx context.Context,
txn *kv.Txn,
db catalog.DatabaseDescriptor,
sc catalog.SchemaDescriptor,
name string,
avoidLeased, mutable, avoidSynthetic bool,
) (found bool, desc catalog.Descriptor, err error) {
var parentID, parentSchemaID descpb.ID
if db != nil {
if sc == nil {
// Schema descriptors are handled in a special way, see getSchemaByName
// function declaration for details.
return getSchemaByName(ctx, tc, txn, db, name, avoidLeased, mutable, avoidSynthetic)
}
parentID, parentSchemaID = db.GetID(), sc.GetID()
}
if found, sd := tc.synthetic.getByName(parentID, parentSchemaID, name); found && !avoidSynthetic {
if mutable {
return false, nil, newMutableSyntheticDescriptorAssertionError(sd.GetID())
}
return true, sd, nil
}
{
refuseFurtherLookup, ud := tc.uncommitted.getByName(parentID, parentSchemaID, name)
if ud != nil {
log.VEventf(ctx, 2, "found uncommitted descriptor %d", ud.GetID())
if mutable {
ud, err = tc.uncommitted.checkOut(ud.GetID())
if err != nil {
return false, nil, err
}
}
return true, ud, nil
}
if refuseFurtherLookup {
return false, nil, nil
}
}
if !avoidLeased && !mutable && !lease.TestingTableLeasesAreDisabled() {
var shouldReadFromStore bool
desc, shouldReadFromStore, err = tc.leased.getByName(ctx, tc.deadlineHolder(txn), parentID, parentSchemaID, name)
if err != nil {
return false, nil, err
}
if !shouldReadFromStore {
return desc != nil, desc, nil
}
}
var descs []catalog.Descriptor
descs, err = tc.withReadFromStore(mutable, func() ([]catalog.MutableDescriptor, error) {
uncommittedDB, _ := tc.uncommitted.getByID(parentID).(catalog.DatabaseDescriptor)
version := tc.settings.Version.ActiveVersion(ctx)
desc, err := tc.kv.getByName(
ctx,
txn,
version,
uncommittedDB,
parentID,
parentSchemaID,
name)
if err != nil {
return nil, err
}
return []catalog.MutableDescriptor{desc}, nil
})
if err != nil {
return false, nil, err
}
return true, descs[0], err
}
// withReadFromStore updates the state of the Collection, especially its
// uncommitted descriptors layer, after reading a descriptor from the storage
// layer. The logic is the same regardless of whether the descriptor was read
// by name or by ID.
func (tc *Collection) withReadFromStore(
requireMutable bool, readFn func() ([]catalog.MutableDescriptor, error),
) (descs []catalog.Descriptor, _ error) {
muts, err := readFn()
if err != nil {
return nil, err
}
descs = make([]catalog.Descriptor, len(muts))
for i, mut := range muts {
if mut == nil {
continue
}
desc, err := tc.uncommitted.add(mut)
if err != nil {
return nil, err
}
if requireMutable {
desc, err = tc.uncommitted.checkOut(desc.GetID())
if err != nil {
return nil, err
}
}
descs[i] = desc
}
return descs, nil
}
func (tc *Collection) deadlineHolder(txn *kv.Txn) deadlineHolder {
if tc.maxTimestampBoundDeadlineHolder.maxTimestampBound.IsEmpty() {
return txn
}
return &tc.maxTimestampBoundDeadlineHolder
}
// Getting a schema by name uses a special resolution path which can avoid
// a namespace lookup because the mapping of database to schema is stored on
// the database itself. This is an important optimization in the case when
// the schema does not exist.
//
// TODO(ajwerner): Understand and rationalize the namespace lookup given the
// schema lookup by ID path only returns descriptors owned by this session.
func getSchemaByName(
ctx context.Context,
tc *Collection,
txn *kv.Txn,
db catalog.DatabaseDescriptor,
name string,
avoidLeased, mutable, avoidSynthetic bool,
) (bool, catalog.Descriptor, error) {
if !db.HasPublicSchemaWithDescriptor() && name == tree.PublicSchema {
return true, schemadesc.GetPublicSchema(), nil
}
if sc := tc.virtual.getSchemaByName(name); sc != nil {
return true, sc, nil
}
if isTemporarySchema(name) {
if isDone, sc := tc.temporary.getSchemaByName(ctx, db.GetID(), name); sc != nil || isDone {
return sc != nil, sc, nil
}
scID, err := tc.kv.lookupName(ctx, txn, nil /* maybeDB */, db.GetID(), keys.RootNamespaceID, name)
if err != nil || scID == descpb.InvalidID {
return false, nil, err
}
return true, schemadesc.NewTemporarySchema(name, scID, db.GetID()), nil
}
if id := db.GetSchemaID(name); id != descpb.InvalidID {
// TODO(ajwerner): Fill in flags here or, more likely, get rid of
// it on this path.
sc, err := tc.getSchemaByID(ctx, txn, id, tree.SchemaLookupFlags{
RequireMutable: mutable,
AvoidLeased: avoidLeased,
AvoidSynthetic: avoidSynthetic,
})
if errors.Is(err, catalog.ErrDescriptorDropped) {
err = nil
}
return sc != nil, sc, err
}
return false, nil, nil
}
func isTemporarySchema(name string) bool {
return strings.HasPrefix(name, catconstants.PgTempSchemaName)
}
// filterDescriptorState wraps the more general catalog function to swallow
// the error if the descriptor is being dropped and the descriptor is not
// required. In that case, dropped will be true. A return value of false, nil
// means this descriptor is okay given the flags.
// TODO (lucy): We would like the ByID methods to ignore the Required flag and
// unconditionally return an error for dropped descriptors if IncludeDropped is
// not set, so we can't just pass the flags passed into the methods into this
// function, hence the boolean argument. This is the only user of
// catalog.FilterDescriptorState which needs to pass in nontrivial flags, at
// time of writing, so we should clean up the interface around this bit of
// functionality.
func filterDescriptorState(
desc catalog.Descriptor, required bool, flags tree.CommonLookupFlags,
) (dropped bool, _ error) {
flags = tree.CommonLookupFlags{
Required: required,
IncludeOffline: flags.IncludeOffline,
IncludeDropped: flags.IncludeDropped,
}
if err := catalog.FilterDescriptorState(desc, flags); err != nil {
if required || !errors.Is(err, catalog.ErrDescriptorDropped) {
return false, err
}
return true, nil
}
return false, nil
}
| pkg/sql/catalog/descs/descriptor.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0003689505101647228,
0.00017545152513775975,
0.00016248987230937928,
0.00017019262304529548,
0.000030649291147710755
] |
{
"id": 10,
"code_window": [
"\t\t[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})\n",
"\t// Validate the constraint IDs are populated.\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 113
} | debug doctor zipdir --verbose
----
debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose
reading testdata/doctor/debugzip/system.descriptor.txt
reading testdata/doctor/debugzip/system.namespace.txt
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
reading testdata/doctor/debugzip/system.jobs.txt
Examining 37 descriptors and 42 namespace entries...
ParentID 0, ParentSchemaID 0: database "system" (1): processed
ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: relation "users" (4): processed
ParentID 1, ParentSchemaID 29: relation "zones" (5): processed
ParentID 1, ParentSchemaID 29: relation "settings" (6): processed
ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed
ParentID 1, ParentSchemaID 29: relation "lease" (11): processed
ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: relation "ui" (14): processed
ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed
ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: relation "locations" (21): processed
ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed
ParentID 1, ParentSchemaID 29: relation "comments" (24): processed
ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed
ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed
ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: database "postgres" (51): processed
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "users" (53): processed
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles"
ParentID 52, ParentSchemaID 29: relation "rides" (55): processed
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides"
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed
ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed
ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed
ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed
ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed
ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed
ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed
ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed
ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed
ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed
ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed
ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed
ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed
ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed
ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed
ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed
Examining 2 jobs...
Processing job 587337426939772929
Processing job 587337426984566785
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir_verbose | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00021374155767261982,
0.00017585247405804694,
0.00016648363089188933,
0.00017318958998657763,
0.000012924869224661961
] |
{
"id": 10,
"code_window": [
"\t\t[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})\n",
"\t// Validate the constraint IDs are populated.\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 113
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execinfra
import (
"time"
"github.com/cockroachdb/cockroach/pkg/util/metric"
)
// DistSQLMetrics contains pointers to the metrics for monitoring DistSQL
// processing.
type DistSQLMetrics struct {
QueriesActive *metric.Gauge
QueriesTotal *metric.Counter
ContendedQueriesCount *metric.Counter
FlowsActive *metric.Gauge
FlowsTotal *metric.Counter
FlowsQueued *metric.Gauge
FlowsScheduled *metric.Counter
QueueWaitHist *metric.Histogram
MaxBytesHist *metric.Histogram
CurBytesCount *metric.Gauge
VecOpenFDs *metric.Gauge
CurDiskBytesCount *metric.Gauge
MaxDiskBytesHist *metric.Histogram
QueriesSpilled *metric.Counter
SpilledBytesWritten *metric.Counter
SpilledBytesRead *metric.Counter
}
// MetricStruct implements the metrics.Struct interface.
func (DistSQLMetrics) MetricStruct() {}
var _ metric.Struct = DistSQLMetrics{}
var (
metaQueriesActive = metric.Metadata{
Name: "sql.distsql.queries.active",
Help: "Number of SQL queries currently active",
Measurement: "Queries",
Unit: metric.Unit_COUNT,
}
metaQueriesTotal = metric.Metadata{
Name: "sql.distsql.queries.total",
Help: "Number of SQL queries executed",
Measurement: "Queries",
Unit: metric.Unit_COUNT,
}
metaContendedQueriesCount = metric.Metadata{
Name: "sql.distsql.contended_queries.count",
Help: "Number of SQL queries that experienced contention",
Measurement: "Queries",
Unit: metric.Unit_COUNT,
}
metaFlowsActive = metric.Metadata{
Name: "sql.distsql.flows.active",
Help: "Number of distributed SQL flows currently active",
Measurement: "Flows",
Unit: metric.Unit_COUNT,
}
metaFlowsTotal = metric.Metadata{
Name: "sql.distsql.flows.total",
Help: "Number of distributed SQL flows executed",
Measurement: "Flows",
Unit: metric.Unit_COUNT,
}
metaFlowsQueued = metric.Metadata{
Name: "sql.distsql.flows.queued",
Help: "Number of distributed SQL flows currently queued",
Measurement: "Flows",
Unit: metric.Unit_COUNT,
}
metaFlowsScheduled = metric.Metadata{
Name: "sql.distsql.flows.scheduled",
Help: "Number of distributed SQL flows scheduled",
Measurement: "Flows",
Unit: metric.Unit_COUNT,
}
metaQueueWaitHist = metric.Metadata{
Name: "sql.distsql.flows.queue_wait",
Help: "Duration of time flows spend waiting in the queue",
Measurement: "Nanoseconds",
Unit: metric.Unit_NANOSECONDS,
}
metaMemMaxBytes = metric.Metadata{
Name: "sql.mem.distsql.max",
Help: "Memory usage per sql statement for distsql",
Measurement: "Memory",
Unit: metric.Unit_BYTES,
}
metaMemCurBytes = metric.Metadata{
Name: "sql.mem.distsql.current",
Help: "Current sql statement memory usage for distsql",
Measurement: "Memory",
Unit: metric.Unit_BYTES,
}
metaVecOpenFDs = metric.Metadata{
Name: "sql.distsql.vec.openfds",
Help: "Current number of open file descriptors used by vectorized external storage",
Measurement: "Files",
Unit: metric.Unit_COUNT,
}
metaDiskCurBytes = metric.Metadata{
Name: "sql.disk.distsql.current",
Help: "Current sql statement disk usage for distsql",
Measurement: "Disk",
Unit: metric.Unit_BYTES,
}
metaDiskMaxBytes = metric.Metadata{
Name: "sql.disk.distsql.max",
Help: "Disk usage per sql statement for distsql",
Measurement: "Disk",
Unit: metric.Unit_BYTES,
}
metaQueriesSpilled = metric.Metadata{
Name: "sql.distsql.queries.spilled",
Help: "Number of queries that have spilled to disk",
Measurement: "Queries",
Unit: metric.Unit_COUNT,
}
metaSpilledBytesWritten = metric.Metadata{
Name: "sql.disk.distsql.spilled.bytes.written",
Help: "Number of bytes written to temporary disk storage as a result of spilling",
Measurement: "Disk",
Unit: metric.Unit_BYTES,
}
metaSpilledBytesRead = metric.Metadata{
Name: "sql.disk.distsql.spilled.bytes.read",
Help: "Number of bytes read from temporary disk storage as a result of spilling",
Measurement: "Disk",
Unit: metric.Unit_BYTES,
}
)
// See pkg/sql/mem_metrics.go
// log10int64times1000 = log10(math.MaxInt64) * 1000, rounded up somewhat
const log10int64times1000 = 19 * 1000
// MakeDistSQLMetrics instantiates the metrics holder for DistSQL monitoring.
func MakeDistSQLMetrics(histogramWindow time.Duration) DistSQLMetrics {
return DistSQLMetrics{
QueriesActive: metric.NewGauge(metaQueriesActive),
QueriesTotal: metric.NewCounter(metaQueriesTotal),
ContendedQueriesCount: metric.NewCounter(metaContendedQueriesCount),
FlowsActive: metric.NewGauge(metaFlowsActive),
FlowsTotal: metric.NewCounter(metaFlowsTotal),
FlowsQueued: metric.NewGauge(metaFlowsQueued),
FlowsScheduled: metric.NewCounter(metaFlowsScheduled),
QueueWaitHist: metric.NewLatency(metaQueueWaitHist, histogramWindow),
MaxBytesHist: metric.NewHistogram(metaMemMaxBytes, histogramWindow, log10int64times1000, 3),
CurBytesCount: metric.NewGauge(metaMemCurBytes),
VecOpenFDs: metric.NewGauge(metaVecOpenFDs),
CurDiskBytesCount: metric.NewGauge(metaDiskCurBytes),
MaxDiskBytesHist: metric.NewHistogram(metaDiskMaxBytes, histogramWindow, log10int64times1000, 3),
QueriesSpilled: metric.NewCounter(metaQueriesSpilled),
SpilledBytesWritten: metric.NewCounter(metaSpilledBytesWritten),
SpilledBytesRead: metric.NewCounter(metaSpilledBytesRead),
}
}
// QueryStart registers the start of a new DistSQL query.
func (m *DistSQLMetrics) QueryStart() {
m.QueriesActive.Inc(1)
m.QueriesTotal.Inc(1)
}
// QueryStop registers the end of a DistSQL query.
func (m *DistSQLMetrics) QueryStop() {
m.QueriesActive.Dec(1)
}
// FlowStart registers the start of a new DistSQL flow.
func (m *DistSQLMetrics) FlowStart() {
m.FlowsActive.Inc(1)
m.FlowsTotal.Inc(1)
}
// FlowStop registers the end of a DistSQL flow.
func (m *DistSQLMetrics) FlowStop() {
m.FlowsActive.Dec(1)
}
| pkg/sql/execinfra/metrics.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001990318123716861,
0.0001723345776554197,
0.00016375628183595836,
0.00017113328794948757,
0.000008280086149170529
] |
{
"id": 10,
"code_window": [
"\t\t[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})\n",
"\t// Validate the constraint IDs are populated.\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 113
} | CREATE INDEX test_fb ON test (f, b);
CREATE UNIQUE INDEX test_f_unique ON test (f);
| pkg/acceptance/compose/flyway/sql/V2__make_index.sql | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001736303820507601,
0.0001736303820507601,
0.0001736303820507601,
0.0001736303820507601,
0
] |
{
"id": 10,
"code_window": [
"\t\t[][]string{{clusterversion.ByKey(tabledesc.ConstraintIDsAddedToTableDescsVersion).String()}})\n",
"\t// Validate the constraint IDs are populated.\n",
"\t// Validate that the descriptor is invalid, since the constraint IDs\n",
"\t// are missing.\n",
"\ttdb.CheckQueryResults(t,\n",
"\t\t`SELECT strpos(desc_json, 'constraintId') > 0,\n",
" strpos(desc_json, 'nextConstraintId') > 0\n",
" FROM (\n",
"\t\tSELECT jsonb_pretty(\n",
"\t\t\t\tcrdb_internal.pb_to_json(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t`SELECT strpos(desc_json, 'nextConstraintId') > 0,\n",
" array_length(string_to_array(desc_json, 'constraintId'), 1) > 4\n"
],
"file_path": "pkg/migration/migrations/ensure_constraint_id_test.go",
"type": "replace",
"edit_start_line_idx": 113
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package bulk
import (
"bytes"
"sort"
"testing"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
)
// kvPair is a bytes -> bytes kv pair.
type kvPair struct {
key roachpb.Key
value []byte
}
func makeTestData(num int) (kvs []kvPair, totalSize int) {
kvs = make([]kvPair, num)
r, _ := randutil.NewTestRand()
alloc := make([]byte, num*500)
randutil.ReadTestdataBytes(r, alloc)
for i := range kvs {
if len(alloc) < 1500 {
const refill = 15000
alloc = make([]byte, refill)
randutil.ReadTestdataBytes(r, alloc)
}
kvs[i].key = alloc[:randutil.RandIntInRange(r, 2, 100)]
alloc = alloc[len(kvs[i].key):]
kvs[i].value = alloc[:randutil.RandIntInRange(r, 0, 1000)]
alloc = alloc[len(kvs[i].value):]
totalSize += len(kvs[i].key) + len(kvs[i].value)
}
return kvs, totalSize
}
func TestKvBuf(t *testing.T) {
defer leaktest.AfterTest(t)()
src, totalSize := makeTestData(50000)
// Write everything to our buf.
b := kvBuf{}
for i := range src {
if err := b.append(src[i].key, src[i].value); err != nil {
t.Fatal(err)
}
}
// Sanity check our buf has right size.
if expected, actual := len(src), b.Len(); expected != actual {
t.Fatalf("expected len %d got %d", expected, actual)
}
if expected, actual := totalSize+len(src)*16, b.MemSize; expected != actual {
t.Fatalf("expected len %d got %d", expected, actual)
}
// Read back what we wrote.
for i := range src {
if expected, actual := src[i].key, b.Key(i); !bytes.Equal(expected, actual) {
t.Fatalf("expected %s\ngot %s", expected, actual)
}
if expected, actual := src[i].value, b.Value(i); !bytes.Equal(expected, actual) {
t.Fatalf("expected %s\ngot %s", expected, actual)
}
}
// Sort both and then ensure they match.
sort.Slice(src, func(i, j int) bool { return bytes.Compare(src[i].key, src[j].key) < 0 })
sort.Sort(&b)
for i := range src {
if expected, actual := src[i].key, b.Key(i); !bytes.Equal(expected, actual) {
t.Fatalf("expected %s\ngot %s", expected, actual)
}
if expected, actual := src[i].value, b.Value(i); !bytes.Equal(expected, actual) {
t.Fatalf("expected %s\ngot %s", expected, actual)
}
}
}
| pkg/kv/bulk/kv_buf_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001767205394571647,
0.00017166847828775644,
0.0001674306986387819,
0.00017182351439259946,
0.0000027221290110901464
] |
{
"id": 11,
"code_window": [
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.OutboundFKs {\n",
"\t\tfk := desc.OutboundFKs[i]\n",
"\t\tif fk.ConstraintID == 0 {\n",
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.UniqueWithoutIndexConstraints {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfk := &desc.OutboundFKs[i]\n"
],
"file_path": "pkg/sql/catalog/tabledesc/table_desc_builder.go",
"type": "replace",
"edit_start_line_idx": 707
} | debug doctor zipdir --verbose
----
debug doctor zipdir testdata/doctor/debugzip 21.11-52 --verbose
reading testdata/doctor/debugzip/system.descriptor.txt
reading testdata/doctor/debugzip/system.namespace.txt
WARNING: errors occurred during the production of system.jobs.txt, contents may be missing or incomplete.
reading testdata/doctor/debugzip/system.jobs.txt
Examining 37 descriptors and 42 namespace entries...
ParentID 0, ParentSchemaID 0: database "system" (1): processed
ParentID 1, ParentSchemaID 29: relation "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: relation "users" (4): processed
ParentID 1, ParentSchemaID 29: relation "zones" (5): processed
ParentID 1, ParentSchemaID 29: relation "settings" (6): processed
ParentID 1, ParentSchemaID 29: relation "tenants" (8): processed
ParentID 1, ParentSchemaID 29: relation "lease" (11): processed
ParentID 1, ParentSchemaID 29: relation "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: relation "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: relation "ui" (14): processed
ParentID 1, ParentSchemaID 29: relation "jobs" (15): processed
ParentID 1, ParentSchemaID 29: relation "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: relation "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: relation "locations" (21): processed
ParentID 1, ParentSchemaID 29: relation "role_members" (23): processed
ParentID 1, ParentSchemaID 29: relation "comments" (24): processed
ParentID 1, ParentSchemaID 29: relation "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: relation "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: relation "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: relation "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: relation "namespace" (30): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: relation "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: relation "role_options" (33): processed
ParentID 1, ParentSchemaID 29: relation "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: relation "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: relation "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: relation "sqlliveness" (39): processed
ParentID 0, ParentSchemaID 0: database "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: database "postgres" (51): processed
ParentID 52, ParentSchemaID 29: relation "users" (53): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "users" (53): processed
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "vehicles" (54): processed
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "rides" (55): constraint id was missing for constraint: FOREIGN KEY with name "fk_vehicle_city_ref_vehicles"
ParentID 52, ParentSchemaID 29: relation "rides" (55): processed
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_rides"
ParentID 52, ParentSchemaID 29: relation "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): referenced database ID 52: referenced descriptor not found
ParentID 52, ParentSchemaID 29: relation "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): constraint id was missing for constraint: FOREIGN KEY with name "fk_city_ref_users"
ParentID 52, ParentSchemaID 29: relation "user_promo_codes" (58): processed
ParentID 0, ParentSchemaID 0: namespace entry "defaultdb" (50): processed
ParentID 0, ParentSchemaID 0: namespace entry "movr" (52): descriptor not found
ParentID 0, ParentSchemaID 0: namespace entry "postgres" (51): processed
ParentID 0, ParentSchemaID 0: namespace entry "system" (1): processed
ParentID 1, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 1, ParentSchemaID 29: namespace entry "comments" (24): processed
ParentID 1, ParentSchemaID 29: namespace entry "descriptor" (3): processed
ParentID 1, ParentSchemaID 29: namespace entry "eventlog" (12): processed
ParentID 1, ParentSchemaID 29: namespace entry "jobs" (15): processed
ParentID 1, ParentSchemaID 29: namespace entry "lease" (11): processed
ParentID 1, ParentSchemaID 29: namespace entry "locations" (21): processed
ParentID 1, ParentSchemaID 29: namespace entry "namespace" (30): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_meta" (31): processed
ParentID 1, ParentSchemaID 29: namespace entry "protected_ts_records" (32): processed
ParentID 1, ParentSchemaID 29: namespace entry "rangelog" (13): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_constraint_stats" (25): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_critical_localities" (26): processed
ParentID 1, ParentSchemaID 29: namespace entry "replication_stats" (27): processed
ParentID 1, ParentSchemaID 29: namespace entry "reports_meta" (28): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_members" (23): processed
ParentID 1, ParentSchemaID 29: namespace entry "role_options" (33): processed
ParentID 1, ParentSchemaID 29: namespace entry "scheduled_jobs" (37): processed
ParentID 1, ParentSchemaID 29: namespace entry "settings" (6): processed
ParentID 1, ParentSchemaID 29: namespace entry "sqlliveness" (39): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_bundle_chunks" (34): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics" (36): processed
ParentID 1, ParentSchemaID 29: namespace entry "statement_diagnostics_requests" (35): processed
ParentID 1, ParentSchemaID 29: namespace entry "table_statistics" (20): processed
ParentID 1, ParentSchemaID 29: namespace entry "tenants" (8): processed
ParentID 1, ParentSchemaID 29: namespace entry "ui" (14): processed
ParentID 1, ParentSchemaID 29: namespace entry "users" (4): processed
ParentID 1, ParentSchemaID 29: namespace entry "web_sessions" (19): processed
ParentID 1, ParentSchemaID 29: namespace entry "zones" (5): processed
ParentID 50, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 51, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 0: namespace entry "public" (29): processed
ParentID 52, ParentSchemaID 29: namespace entry "promo_codes" (57): processed
ParentID 52, ParentSchemaID 29: namespace entry "rides" (55): processed
ParentID 52, ParentSchemaID 29: namespace entry "user_promo_codes" (58): processed
ParentID 52, ParentSchemaID 29: namespace entry "users" (53): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicle_location_histories" (56): processed
ParentID 52, ParentSchemaID 29: namespace entry "vehicles" (54): processed
Examining 2 jobs...
Processing job 587337426939772929
Processing job 587337426984566785
job 587337426984566785: running schema change GC refers to missing table descriptor(s) [59]; existing descriptors that still need to be dropped []; job safe to delete: true.
ERROR: validation failed
| pkg/cli/testdata/doctor/test_examine_zipdir_verbose | 1 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017648008360993117,
0.00017309670511167496,
0.00016898612375371158,
0.00017266999930143356,
0.000002563330099292216
] |
{
"id": 11,
"code_window": [
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.OutboundFKs {\n",
"\t\tfk := desc.OutboundFKs[i]\n",
"\t\tif fk.ConstraintID == 0 {\n",
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.UniqueWithoutIndexConstraints {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfk := &desc.OutboundFKs[i]\n"
],
"file_path": "pkg/sql/catalog/tabledesc/table_desc_builder.go",
"type": "replace",
"edit_start_line_idx": 707
} | statement ok
CREATE TABLE a (a INT, b INT, c INT4, PRIMARY KEY (a, b));
INSERT INTO a SELECT g//2, g, g FROM generate_series(0,2000) g(g)
# Mismatched constant type in projection. Not handled yet but should fall back
# gracefully.
query I
SELECT (a + 1.0::DECIMAL)::INT FROM a LIMIT 1
----
1
# Mismatched column types in projection. Not handled yet but should fall back
# gracefully.
statement ok
CREATE TABLE intdecfloat (a INT, b DECIMAL, c INT4, d INT2, e FLOAT8);
INSERT INTO intdecfloat VALUES (1, 2.0, 3, 4, 3.5)
query I
SELECT (a + b)::INT FROM intdecfloat
----
3
# Tests for #39417
statement ok
CREATE TABLE t39417 (x int8);
INSERT INTO t39417 VALUES (10)
query R
select (x/1) from t39417
----
10
# Regression test for CASE operator and flat bytes.
statement ok
CREATE TABLE t44624(c0 STRING, c1 BOOL); INSERT INTO t44624(rowid, c0, c1) VALUES (0, '', true), (1, '', NULL)
query TB rowsort
SELECT * FROM t44624 ORDER BY CASE WHEN c1 IS NULL THEN c0 WHEN true THEN c0 END
----
· true
· NULL
# Test that unsupported post process specs get wrapped in the vectorized engine.
statement ok
CREATE TABLE mixed_type_a (a INT, b TIMESTAMPTZ);
CREATE TABLE mixed_type_b (a INT, b INTERVAL, c TIMESTAMP);
INSERT INTO mixed_type_a VALUES (0, 0::TIMESTAMPTZ);
INSERT INTO mixed_type_b VALUES (0, INTERVAL '0 days', 0::TIMESTAMP)
query B
SELECT b > now() - interval '1 day' FROM mixed_type_a
----
false
# Merge join ON expressions also get wrapped.
query ITITT
SELECT * FROM mixed_type_a AS a INNER MERGE JOIN mixed_type_b AS b ON a.a = b.a AND a.b < (now() - b.b)
----
0 1970-01-01 00:00:00 +0000 UTC 0 00:00:00 1970-01-01 00:00:00 +0000 +0000
# So do hash inner hash join ON expressions.
query ITITT
SELECT * FROM mixed_type_a AS a JOIN mixed_type_b AS b ON a.a = b.a AND a.b < (now() - b.b)
----
0 1970-01-01 00:00:00 +0000 UTC 0 00:00:00 1970-01-01 00:00:00 +0000 +0000
# Regression for flat bytes vector not being reset when it is reused by a
# projecting operator. Interestingly, this query (originally placed in
# 'vectorize' file) contains a subquery which is handled as core.LocalPlanNode
# which we cannot wrap, so it cannot be run with experimental_always setting.
# I'm not sure whether it serves the purpose with which it was added, but we
# might as well keep the query.
query TTT
WITH
with_194015 (col_1548014)
AS (
SELECT
*
FROM
(
VALUES
(('-28 years -2 mons -677 days -11:53:30.528699':::INTERVAL::INTERVAL + '11:55:41.419498':::TIME::TIME)::TIME + '1973-01-24':::DATE::DATE),
('1970-01-11 01:38:09.000155+00:00':::TIMESTAMP),
('1970-01-09 07:04:13.000247+00:00':::TIMESTAMP),
('1970-01-07 14:19:52.000951+00:00':::TIMESTAMP),
(NULL)
)
AS tab_240443 (col_1548014)
),
with_194016 (col_1548015, col_1548016, col_1548017)
AS (
SELECT
*
FROM
(
VALUES
(
'160.182.25.199/22':::INET::INET << 'c2af:30cb:5db8:bb79:4d11:2d0:1de8:bcea/59':::INET::INET,
'09:14:05.761109':::TIME::TIME + '4 years 7 mons 345 days 23:43:13.325036':::INTERVAL::INTERVAL,
B'0101010110101011101001111010100011001111001110001000101100011001101'
),
(false, '14:36:41.282187':::TIME, B'011111111011001100000001101101011111110110010011110100110111100')
)
AS tab_240444 (col_1548015, col_1548016, col_1548017)
),
with_194017 (col_1548018)
AS (SELECT * FROM (VALUES ('43a30bc5-e412-426d-b99a-65783a7ed445':::UUID), (NULL), (crdb_internal.cluster_id()::UUID)) AS tab_240445 (col_1548018))
SELECT
CASE
WHEN false THEN age('1970-01-09 08:48:24.000568+00:00':::TIMESTAMPTZ::TIMESTAMPTZ, '1970-01-07 08:40:45.000483+00:00':::TIMESTAMPTZ::TIMESTAMPTZ)::INTERVAL
ELSE (
(
(-0.02805450661234963150):::DECIMAL::DECIMAL
* array_position(
(gen_random_uuid()::UUID::UUID || (NULL::UUID || NULL::UUID[])::UUID[])::UUID[],
'5f29920d-7db1-4efc-b1cc-d1a7d0bcf145':::UUID::UUID
)::INT8::INT8
)::DECIMAL
* age('1970-01-04 07:17:45.000268+00:00':::TIMESTAMPTZ::TIMESTAMPTZ, NULL::TIMESTAMPTZ)::INTERVAL::INTERVAL
)
END::INTERVAL
+ '-21 years -10 mons -289 days -13:27:05.205069':::INTERVAL::INTERVAL
AS col_1548019,
'1984-01-07':::DATE AS col_1548020,
'f96fd19a-d2a9-4d98-81dd-97e3fc2a45d2':::UUID AS col_1548022
FROM
with_194015
ORDER BY
with_194015.col_1548014 DESC
LIMIT
4:::INT8;
----
NULL 1984-01-07 00:00:00 +0000 +0000 f96fd19a-d2a9-4d98-81dd-97e3fc2a45d2
NULL 1984-01-07 00:00:00 +0000 +0000 f96fd19a-d2a9-4d98-81dd-97e3fc2a45d2
NULL 1984-01-07 00:00:00 +0000 +0000 f96fd19a-d2a9-4d98-81dd-97e3fc2a45d2
NULL 1984-01-07 00:00:00 +0000 +0000 f96fd19a-d2a9-4d98-81dd-97e3fc2a45d2
| pkg/sql/logictest/testdata/logic_test/vectorize_unsupported | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017259229207411408,
0.0001669108314672485,
0.00015999564493540674,
0.00016820983728393912,
0.000003836402811430162
] |
{
"id": 11,
"code_window": [
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.OutboundFKs {\n",
"\t\tfk := desc.OutboundFKs[i]\n",
"\t\tif fk.ConstraintID == 0 {\n",
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.UniqueWithoutIndexConstraints {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfk := &desc.OutboundFKs[i]\n"
],
"file_path": "pkg/sql/catalog/tabledesc/table_desc_builder.go",
"type": "replace",
"edit_start_line_idx": 707
} | alter_zone_range_stmt ::=
'ALTER' 'RANGE' a_expr 'CONFIGURE' 'ZONE' 'USING' variable '=' 'COPY' 'FROM' 'PARENT' ( ( ',' variable '=' value | ',' variable '=' 'COPY' 'FROM' 'PARENT' ) )*
| 'ALTER' 'RANGE' a_expr 'CONFIGURE' 'ZONE' 'USING' variable '=' value ( ( ',' variable '=' value | ',' variable '=' 'COPY' 'FROM' 'PARENT' ) )*
| 'ALTER' 'RANGE' a_expr 'CONFIGURE' 'ZONE' 'DISCARD'
| docs/generated/sql/bnf/alter_zone_range_stmt.bnf | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.0001668343466008082,
0.0001668343466008082,
0.0001668343466008082,
0.0001668343466008082,
0
] |
{
"id": 11,
"code_window": [
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.OutboundFKs {\n",
"\t\tfk := desc.OutboundFKs[i]\n",
"\t\tif fk.ConstraintID == 0 {\n",
"\t\t\tfk.ConstraintID = nextConstraintID()\n",
"\t\t}\n",
"\t}\n",
"\tfor i := range desc.UniqueWithoutIndexConstraints {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfk := &desc.OutboundFKs[i]\n"
],
"file_path": "pkg/sql/catalog/tabledesc/table_desc_builder.go",
"type": "replace",
"edit_start_line_idx": 707
} | // Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import Long from "long";
import { CollectedStatementStatistics } from ".";
export const statementsWithSameIdButDifferentNodeId: CollectedStatementStatistics[] = [
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 4,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.00059666132, squared_diffs: 7.147805595954399e-7 },
run_lat: { mean: 0.00514530948, squared_diffs: 0.0046229060751506665 },
service_lat: {
mean: 0.012356466080000001,
squared_diffs: 0.019981287541202375,
},
overhead_lat: {
mean: 0.00661449528,
squared_diffs: 0.005505204936792645,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 211741417,
},
},
bytes_read: { mean: 0, squared_diffs: 0 },
rows_read: { mean: 0, squared_diffs: 0 },
rows_written: { mean: 1, squared_diffs: 0 },
last_exec_timestamp: {
seconds: Long.fromInt(1599670290),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 3,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.00060528624, squared_diffs: 5.477748852385602e-7 },
run_lat: { mean: 0.0016260668, squared_diffs: 0.000031665565684372014 },
service_lat: {
mean: 0.00436566136,
squared_diffs: 0.00015617540178032176,
},
overhead_lat: {
mean: 0.00213430832,
squared_diffs: 0.00009059206052710744,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 95596942,
},
},
bytes_read: {
mean: 47.24000000000001,
squared_diffs: 1338970.5599999998,
},
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 1,
squared_diffs: 0.2,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599670272),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 6,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.00062084732, squared_diffs: 4.894461542734397e-7 },
run_lat: {
mean: 0.0033482228399999993,
squared_diffs: 0.0007254204094330012,
},
service_lat: {
mean: 0.007378451560000001,
squared_diffs: 0.0025513393104186605,
},
overhead_lat: {
mean: 0.0034093813999999997,
squared_diffs: 0.000581731831513146,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 349254376,
},
},
bytes_read: {
mean: 59.35999999999999,
squared_diffs: 2114165.7600000007,
},
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 0.01999999999999999,
squared_diffs: 1.899,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599670192),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(3)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 7,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.0006217386, squared_diffs: 9.382557539239999e-7 },
run_lat: {
mean: 0.0023451749200000004,
squared_diffs: 0.00019083215761566384,
},
service_lat: {
mean: 0.00556639388,
squared_diffs: 0.0003733923276328406,
},
overhead_lat: {
mean: 0.0025994803599999994,
squared_diffs: 0.00010200473233901374,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 459389689,
},
},
bytes_read: { mean: 0, squared_diffs: 0 },
rows_read: { mean: 0, squared_diffs: 0 },
rows_written: { mean: 1, squared_diffs: 0 },
last_exec_timestamp: {
seconds: Long.fromInt(1599670299),
nanos: 111613000,
},
nodes: [Long.fromInt(1)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 9,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.00060384184, squared_diffs: 3.7137755547336e-7 },
run_lat: {
mean: 0.0013325950800000001,
squared_diffs: 0.000014319016889955842,
},
service_lat: {
mean: 0.0037200103599999996,
squared_diffs: 0.00003218580528155976,
},
overhead_lat: {
mean: 0.0017835734399999999,
squared_diffs: 0.000003415912313800157,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 651098561,
},
},
bytes_read: { mean: 0, squared_diffs: 0 },
rows_read: { mean: 0, squared_diffs: 0 },
rows_written: { mean: 1, squared_diffs: 0 },
last_exec_timestamp: {
seconds: Long.fromInt(1599670242),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2), Long.fromInt(4)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 8,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: { mean: 0.00060733856, squared_diffs: 9.9016117651016e-7 },
run_lat: {
mean: 0.0016457390799999995,
squared_diffs: 0.00004348354674075585,
},
service_lat: {
mean: 0.00508726124,
squared_diffs: 0.0006775265878511066,
},
overhead_lat: {
mean: 0.0028341836000000003,
squared_diffs: 0.0004969353409856141,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 541858988,
},
},
bytes_read: {
mean: 75.36000000000003,
squared_diffs: 3407477.7600000002,
},
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 0.19999,
squared_diffs: 0,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599650292),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 1,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: {
mean: 0.0005529296400000001,
squared_diffs: 1.2621076480776003e-7,
},
run_lat: {
mean: 0.0010534599600000001,
squared_diffs: 0.00000299611852526496,
},
service_lat: {
mean: 0.0033479916,
squared_diffs: 0.000013804212527590004,
},
overhead_lat: {
mean: 0.001741602,
squared_diffs: 0.000009894811044980005,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851545),
nanos: 886049756,
},
},
bytes_read: { mean: 46.48, squared_diffs: 1296234.24 },
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 1,
squared_diffs: 0,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599670282),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 5,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: {
mean: 0.0006839353200000001,
squared_diffs: 0.0000027050684666694405,
},
run_lat: {
mean: 0.004587737999999999,
squared_diffs: 0.002054554101549576,
},
service_lat: {
mean: 0.006800420800000001,
squared_diffs: 0.0022942135874811503,
},
overhead_lat: {
mean: 0.00152874748,
squared_diffs: 0.000020610108769158232,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 307939903,
},
},
bytes_read: {
mean: 59.35999999999999,
squared_diffs: 2114165.7600000007,
},
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 1,
squared_diffs: 0,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599670257),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
{
key: {
key_data: {
query:
"UPDATE system.jobs SET claim_session_id = _ WHERE ((claim_session_id != $1) AND (status IN (_, _, __more3__))) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
app: "$ internal-expire-sessions",
distSQL: false,
failed: false,
implicit_txn: true,
vec: false,
full_scan: false,
},
node_id: 2,
},
stats: {
count: new Long(25),
first_attempt_count: new Long(25),
max_retries: new Long(0),
legacy_last_err: "",
num_rows: { mean: 0, squared_diffs: 0 },
parse_lat: { mean: 0, squared_diffs: 0 },
plan_lat: {
mean: 0.0013118371600000002,
squared_diffs: 0.0003047812983599774,
},
run_lat: { mean: 0.00097797752, squared_diffs: 0.000015702406008938238 },
service_lat: {
mean: 0.004671932679999999,
squared_diffs: 0.0013375429385049276,
},
overhead_lat: {
mean: 0.0023821180000000008,
squared_diffs: 0.0003571512515438199,
},
legacy_last_err_redacted: "",
sensitive_info: {
last_err: "",
most_recent_plan_description: {
name: "update",
attrs: [
{ key: "table", value: "jobs" },
{ key: "set", value: "claim_session_id" },
{ key: "auto commit", value: "" },
],
children: [
{
name: "render",
children: [
{
name: "filter",
attrs: [
{
key: "filter",
value:
"(claim_session_id != _) AND (NOT crdb_internal.sql_liveness_is_alive(claim_session_id))",
},
],
children: [
{
name: "index join",
attrs: [{ key: "table", value: "jobs@primary" }],
children: [
{
name: "scan",
attrs: [
{ key: "missing stats", value: "" },
{
key: "table",
value: "jobs@jobs_status_created_idx",
},
{ key: "spans", value: "5 spans" },
],
},
],
},
],
},
],
},
],
},
most_recent_plan_timestamp: {
seconds: new Long(1614851546),
nanos: 889864,
},
},
bytes_read: { mean: 47.07999999999999, squared_diffs: 1329915.84 },
rows_read: {
mean: 0.07999999999999999,
squared_diffs: 3.8399999999999994,
},
rows_written: {
mean: 1,
squared_diffs: 0,
},
last_exec_timestamp: {
seconds: Long.fromInt(1599670279),
nanos: 111613000,
},
nodes: [Long.fromInt(1), Long.fromInt(2)],
exec_stats: {
count: new Long(0),
network_bytes: { mean: 0, squared_diffs: 0 },
max_mem_usage: { mean: 0, squared_diffs: 0 },
contention_time: { mean: 0, squared_diffs: 0 },
network_messages: { mean: 0, squared_diffs: 0 },
max_disk_usage: { mean: 0, squared_diffs: 0 },
},
},
id: new Long(8717981371097536892),
},
];
| pkg/ui/workspaces/cluster-ui/src/util/appStats/appStats.fixture.ts | 0 | https://github.com/cockroachdb/cockroach/commit/0d8e15c112246a11db9d4e370b5e9bcfeac82aa8 | [
0.00017921942344401032,
0.0001732335367705673,
0.0001664242590777576,
0.0001732627279125154,
0.0000022139795419207076
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t_ \"net/http/pprof\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/kubernetes/pkg/api\"\n",
"\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n",
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 24
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags for initializing a proxy.
package options
import (
_ "net/http/pprof"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util"
"github.com/spf13/pflag"
)
const (
ExperimentalProxyModeAnnotation = "net.experimental.kubernetes.io/proxy-mode"
)
// ProxyServerConfig configures and runs a Kubernetes proxy server
type ProxyServerConfig struct {
componentconfig.KubeProxyConfiguration
ResourceContainer string
KubeAPIQPS float32
KubeAPIBurst int
ConfigSyncPeriod time.Duration
CleanupAndExit bool
NodeRef *api.ObjectReference
Master string
Kubeconfig string
}
func NewProxyConfig() *ProxyServerConfig {
return &ProxyServerConfig{
KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
BindAddress: "0.0.0.0",
HealthzPort: 10249,
HealthzBindAddress: "127.0.0.1",
OOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),
ResourceContainer: "/kube-proxy",
IPTablesSyncPeriod: unversioned.Duration{30 * time.Second},
UDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},
ConntrackMax: 256 * 1024, // 4x default (64k)
ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
},
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
}
}
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
}
| cmd/kube-proxy/app/options/options.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.10657811909914017,
0.0123298866674304,
0.00017101816774811596,
0.0003109227982349694,
0.03166518732905388
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t_ \"net/http/pprof\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/kubernetes/pkg/api\"\n",
"\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n",
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 24
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentialprovider
import (
"encoding/json"
"net"
"net/url"
"path/filepath"
"sort"
"strings"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/sets"
)
// DockerKeyring tracks a set of docker registry credentials, maintaining a
// reverse index across the registry endpoints. A registry endpoint is made
// up of a host (e.g. registry.example.com), but it may also contain a path
// (e.g. registry.example.com/foo) This index is important for two reasons:
// - registry endpoints may overlap, and when this happens we must find the
// most specific match for a given image
// - iterating a map does not yield predictable results
type DockerKeyring interface {
Lookup(image string) ([]docker.AuthConfiguration, bool)
}
// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring
type BasicDockerKeyring struct {
index []string
creds map[string][]docker.AuthConfiguration
}
// lazyDockerKeyring is an implementation of DockerKeyring that lazily
// materializes its dockercfg based on a set of dockerConfigProviders.
type lazyDockerKeyring struct {
Providers []DockerConfigProvider
}
func (dk *BasicDockerKeyring) Add(cfg DockerConfig) {
if dk.index == nil {
dk.index = make([]string, 0)
dk.creds = make(map[string][]docker.AuthConfiguration)
}
for loc, ident := range cfg {
creds := docker.AuthConfiguration{
Username: ident.Username,
Password: ident.Password,
Email: ident.Email,
}
parsed, err := url.Parse(loc)
if err != nil {
glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err)
continue
}
// The docker client allows exact matches:
// foo.bar.com/namespace
// Or hostname matches:
// foo.bar.com
// See ResolveAuthConfig in docker/registry/auth.go.
if parsed.Host != "" {
// NOTE: foo.bar.com comes through as Path.
dk.creds[parsed.Host] = append(dk.creds[parsed.Host], creds)
dk.index = append(dk.index, parsed.Host)
}
if (len(parsed.Path) > 0) && (parsed.Path != "/") {
key := parsed.Host + parsed.Path
dk.creds[key] = append(dk.creds[key], creds)
dk.index = append(dk.index, key)
}
}
eliminateDupes := sets.NewString(dk.index...)
dk.index = eliminateDupes.List()
// Update the index used to identify which credentials to use for a given
// image. The index is reverse-sorted so more specific paths are matched
// first. For example, if for the given image "quay.io/coreos/etcd",
// credentials for "quay.io/coreos" should match before "quay.io".
sort.Sort(sort.Reverse(sort.StringSlice(dk.index)))
}
const defaultRegistryHost = "index.docker.io/v1/"
// isDefaultRegistryMatch determines whether the given image will
// pull from the default registry (DockerHub) based on the
// characteristics of its name.
func isDefaultRegistryMatch(image string) bool {
parts := strings.SplitN(image, "/", 2)
if len(parts[0]) == 0 {
return false
}
if len(parts) == 1 {
// e.g. library/ubuntu
return true
}
if parts[0] == "docker.io" || parts[0] == "index.docker.io" {
// resolve docker.io/image and index.docker.io/image as default registry
return true
}
// From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/
// Docker looks for either a “.” (domain separator) or “:” (port separator)
// to learn that the first part of the repository name is a location and not
// a user name.
return !strings.ContainsAny(parts[0], ".:")
}
// url.Parse require a scheme, but ours don't have schemes. Adding a
// scheme to make url.Parse happy, then clear out the resulting scheme.
func parseSchemelessUrl(schemelessUrl string) (*url.URL, error) {
parsed, err := url.Parse("https://" + schemelessUrl)
if err != nil {
return nil, err
}
// clear out the resulting scheme
parsed.Scheme = ""
return parsed, nil
}
// split the host name into parts, as well as the port
func splitUrl(url *url.URL) (parts []string, port string) {
host, port, err := net.SplitHostPort(url.Host)
if err != nil {
// could not parse port
host, port = url.Host, ""
}
return strings.Split(host, "."), port
}
// overloaded version of urlsMatch, operating on strings instead of URLs.
func urlsMatchStr(glob string, target string) (bool, error) {
globUrl, err := parseSchemelessUrl(glob)
if err != nil {
return false, err
}
targetUrl, err := parseSchemelessUrl(target)
if err != nil {
return false, err
}
return urlsMatch(globUrl, targetUrl)
}
// check whether the given target url matches the glob url, which may have
// glob wild cards in the host name.
//
// Examples:
// globUrl=*.docker.io, targetUrl=blah.docker.io => match
// globUrl=*.docker.io, targetUrl=not.right.io => no match
//
// Note that we don't support wildcards in ports and paths yet.
func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) {
globUrlParts, globPort := splitUrl(globUrl)
targetUrlParts, targetPort := splitUrl(targetUrl)
if globPort != targetPort {
// port doesn't match
return false, nil
}
if len(globUrlParts) != len(targetUrlParts) {
// host name does not have the same number of parts
return false, nil
}
if !strings.HasPrefix(targetUrl.Path, globUrl.Path) {
// the path of the credential must be a prefix
return false, nil
}
for k, globUrlPart := range globUrlParts {
targetUrlPart := targetUrlParts[k]
matched, err := filepath.Match(globUrlPart, targetUrlPart)
if err != nil {
return false, err
}
if !matched {
// glob mismatch for some part
return false, nil
}
}
// everything matches
return true, nil
}
// Lookup implements the DockerKeyring method for fetching credentials based on image name.
// Multiple credentials may be returned if there are multiple potentially valid credentials
// available. This allows for rotation.
func (dk *BasicDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) {
// range over the index as iterating over a map does not provide a predictable ordering
ret := []docker.AuthConfiguration{}
for _, k := range dk.index {
// both k and image are schemeless URLs because even though schemes are allowed
// in the credential configurations, we remove them in Add.
if matched, _ := urlsMatchStr(k, image); !matched {
continue
}
ret = append(ret, dk.creds[k]...)
}
if len(ret) > 0 {
return ret, true
}
// Use credentials for the default registry if provided, and appropriate
if auth, ok := dk.creds[defaultRegistryHost]; ok && isDefaultRegistryMatch(image) {
return auth, true
}
return []docker.AuthConfiguration{}, false
}
// Lookup implements the DockerKeyring method for fetching credentials
// based on image name.
func (dk *lazyDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) {
keyring := &BasicDockerKeyring{}
for _, p := range dk.Providers {
keyring.Add(p.Provide())
}
return keyring.Lookup(image)
}
type FakeKeyring struct {
auth []docker.AuthConfiguration
ok bool
}
func (f *FakeKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) {
return f.auth, f.ok
}
// unionDockerKeyring delegates to a set of keyrings.
type unionDockerKeyring struct {
keyrings []DockerKeyring
}
func (k *unionDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) {
authConfigs := []docker.AuthConfiguration{}
for _, subKeyring := range k.keyrings {
if subKeyring == nil {
continue
}
currAuthResults, _ := subKeyring.Lookup(image)
authConfigs = append(authConfigs, currAuthResults...)
}
return authConfigs, (len(authConfigs) > 0)
}
// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do,
// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring.
// If they do not, then the default keyring is returned
func MakeDockerKeyring(passedSecrets []api.Secret, defaultKeyring DockerKeyring) (DockerKeyring, error) {
passedCredentials := []DockerConfig{}
for _, passedSecret := range passedSecrets {
if dockerConfigJsonBytes, dockerConfigJsonExists := passedSecret.Data[api.DockerConfigJsonKey]; (passedSecret.Type == api.SecretTypeDockerConfigJson) && dockerConfigJsonExists && (len(dockerConfigJsonBytes) > 0) {
dockerConfigJson := DockerConfigJson{}
if err := json.Unmarshal(dockerConfigJsonBytes, &dockerConfigJson); err != nil {
return nil, err
}
passedCredentials = append(passedCredentials, dockerConfigJson.Auths)
} else if dockercfgBytes, dockercfgExists := passedSecret.Data[api.DockerConfigKey]; (passedSecret.Type == api.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) {
dockercfg := DockerConfig{}
if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil {
return nil, err
}
passedCredentials = append(passedCredentials, dockercfg)
}
}
if len(passedCredentials) > 0 {
basicKeyring := &BasicDockerKeyring{}
for _, currCredentials := range passedCredentials {
basicKeyring.Add(currCredentials)
}
return &unionDockerKeyring{[]DockerKeyring{basicKeyring, defaultKeyring}}, nil
}
return defaultKeyring, nil
}
| pkg/credentialprovider/keyring.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.024052297696471214,
0.000998848001472652,
0.000162828917382285,
0.0001692171790637076,
0.00421124417334795
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t_ \"net/http/pprof\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/kubernetes/pkg/api\"\n",
"\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n",
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 24
} | package netlink
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"syscall"
"github.com/vishvananda/netlink/nl"
)
var native = nl.NativeEndian()
var lookupByDump = false
var macvlanModes = [...]uint32{
0,
nl.MACVLAN_MODE_PRIVATE,
nl.MACVLAN_MODE_VEPA,
nl.MACVLAN_MODE_BRIDGE,
nl.MACVLAN_MODE_PASSTHRU,
nl.MACVLAN_MODE_SOURCE,
}
func ensureIndex(link *LinkAttrs) {
if link != nil && link.Index == 0 {
newlink, _ := LinkByName(link.Name)
if newlink != nil {
link.Index = newlink.Attrs().Index
}
}
}
// LinkSetUp enables the link device.
// Equivalent to: `ip link set $link up`
func LinkSetUp(link Link) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Change = syscall.IFF_UP
msg.Flags = syscall.IFF_UP
msg.Index = int32(base.Index)
req.AddData(msg)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetDown disables link device.
// Equivalent to: `ip link set $link down`
func LinkSetDown(link Link) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Change = syscall.IFF_UP
msg.Flags = 0 & ^syscall.IFF_UP
msg.Index = int32(base.Index)
req.AddData(msg)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetMTU sets the mtu of the link device.
// Equivalent to: `ip link set $link mtu $mtu`
func LinkSetMTU(link Link, mtu int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
b := make([]byte, 4)
native.PutUint32(b, uint32(mtu))
data := nl.NewRtAttr(syscall.IFLA_MTU, b)
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetName sets the name of the link device.
// Equivalent to: `ip link set $link name $name`
func LinkSetName(link Link, name string) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetHardwareAddr sets the hardware address of the link device.
// Equivalent to: `ip link set $link address $hwaddr`
func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetMaster sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
func LinkSetMaster(link Link, master *Bridge) error {
index := 0
if master != nil {
masterBase := master.Attrs()
ensureIndex(masterBase)
index = masterBase.Index
}
return LinkSetMasterByIndex(link, index)
}
// LinkSetMasterByIndex sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
func LinkSetMasterByIndex(link Link, masterIndex int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
b := make([]byte, 4)
native.PutUint32(b, uint32(masterIndex))
data := nl.NewRtAttr(syscall.IFLA_MASTER, b)
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetNsPid puts the device into a new network namespace. The
// pid must be a pid of a running process.
// Equivalent to: `ip link set $link netns $pid`
func LinkSetNsPid(link Link, nspid int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
b := make([]byte, 4)
native.PutUint32(b, uint32(nspid))
data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b)
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
// LinkSetNsFd puts the device into a new network namespace. The
// fd must be an open file descriptor to a network namespace.
// Similar to: `ip link set $link netns $ns`
func LinkSetNsFd(link Link, fd int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
b := make([]byte, 4)
native.PutUint32(b, uint32(fd))
data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b)
req.AddData(data)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
func boolAttr(val bool) []byte {
var v uint8
if val {
v = 1
}
return nl.Uint8Attr(v)
}
type vxlanPortRange struct {
Lo, Hi uint16
}
func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
if vxlan.VtepDevIndex != 0 {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
}
if vxlan.SrcAddr != nil {
ip := vxlan.SrcAddr.To4()
if ip != nil {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
} else {
ip = vxlan.SrcAddr.To16()
if ip != nil {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
}
}
}
if vxlan.Group != nil {
group := vxlan.Group.To4()
if group != nil {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
} else {
group = vxlan.Group.To16()
if group != nil {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
}
}
}
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
if vxlan.GBP {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, boolAttr(vxlan.GBP))
}
if vxlan.NoAge {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
} else if vxlan.Age > 0 {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
}
if vxlan.Limit > 0 {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
}
if vxlan.Port > 0 {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port)))
}
if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, &pr)
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
}
}
// LinkAdd adds a new link device. The type and features of the device
// are taken fromt the parameters in the link object.
// Equivalent to: `ip link add $link`
func LinkAdd(link Link) error {
// TODO: set mtu and hardware address
// TODO: support extra data for macvlan
base := link.Attrs()
if base.Name == "" {
return fmt.Errorf("LinkAttrs.Name cannot be empty!")
}
req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
if base.ParentIndex != 0 {
b := make([]byte, 4)
native.PutUint32(b, uint32(base.ParentIndex))
data := nl.NewRtAttr(syscall.IFLA_LINK, b)
req.AddData(data)
} else if link.Type() == "ipvlan" {
return fmt.Errorf("Can't create ipvlan link without ParentIndex")
}
nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
req.AddData(nameData)
if base.MTU > 0 {
mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
req.AddData(mtu)
}
if base.TxQLen >= 0 {
qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
req.AddData(qlen)
}
if base.Namespace != nil {
var attr *nl.RtAttr
switch base.Namespace.(type) {
case NsPid:
val := nl.Uint32Attr(uint32(base.Namespace.(NsPid)))
attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val)
case NsFd:
val := nl.Uint32Attr(uint32(base.Namespace.(NsFd)))
attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val)
}
req.AddData(attr)
}
linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
if vlan, ok := link.(*Vlan); ok {
b := make([]byte, 2)
native.PutUint16(b, uint16(vlan.VlanId))
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
} else if veth, ok := link.(*Veth); ok {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
if base.TxQLen >= 0 {
nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
}
if base.MTU > 0 {
nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
}
} else if vxlan, ok := link.(*Vxlan); ok {
addVxlanAttrs(vxlan, linkInfo)
} else if ipv, ok := link.(*IPVlan); ok {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
} else if macv, ok := link.(*Macvlan); ok {
if macv.Mode != MACVLAN_MODE_DEFAULT {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
}
}
req.AddData(linkInfo)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
if err != nil {
return err
}
ensureIndex(base)
// can't set master during create, so set it afterwards
if base.MasterIndex != 0 {
// TODO: verify MasterIndex is actually a bridge?
return LinkSetMasterByIndex(link, base.MasterIndex)
}
return nil
}
// LinkDel deletes link device. Either Index or Name must be set in
// the link object for it to be deleted. The other values are ignored.
// Equivalent to: `ip link del $link`
func LinkDel(link Link) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(base.Index)
req.AddData(msg)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
}
func linkByNameDump(name string) (Link, error) {
links, err := LinkList()
if err != nil {
return nil, err
}
for _, link := range links {
if link.Attrs().Name == name {
return link, nil
}
}
return nil, fmt.Errorf("Link %s not found", name)
}
// LinkByName finds a link by name and returns a pointer to the object.
func LinkByName(name string) (Link, error) {
if lookupByDump {
return linkByNameDump(name)
}
req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name))
req.AddData(nameData)
link, err := execGetLink(req)
if err == syscall.EINVAL {
// older kernels don't support looking up via IFLA_IFNAME
// so fall back to dumping all links
lookupByDump = true
return linkByNameDump(name)
}
return link, err
}
// LinkByIndex finds a link by index and returns a pointer to the object.
func LinkByIndex(index int) (Link, error) {
req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
msg.Index = int32(index)
req.AddData(msg)
return execGetLink(req)
}
func execGetLink(req *nl.NetlinkRequest) (Link, error) {
msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
if err != nil {
if errno, ok := err.(syscall.Errno); ok {
if errno == syscall.ENODEV {
return nil, fmt.Errorf("Link not found")
}
}
return nil, err
}
switch {
case len(msgs) == 0:
return nil, fmt.Errorf("Link not found")
case len(msgs) == 1:
return linkDeserialize(msgs[0])
default:
return nil, fmt.Errorf("More than one link found")
}
}
// linkDeserialize deserializes a raw message received from netlink into
// a link object.
func linkDeserialize(m []byte) (Link, error) {
msg := nl.DeserializeIfInfomsg(m)
attrs, err := nl.ParseRouteAttr(m[msg.Len():])
if err != nil {
return nil, err
}
base := LinkAttrs{Index: int(msg.Index), Flags: linkFlags(msg.Flags)}
var link Link
linkType := ""
for _, attr := range attrs {
switch attr.Attr.Type {
case syscall.IFLA_LINKINFO:
infos, err := nl.ParseRouteAttr(attr.Value)
if err != nil {
return nil, err
}
for _, info := range infos {
switch info.Attr.Type {
case nl.IFLA_INFO_KIND:
linkType = string(info.Value[:len(info.Value)-1])
switch linkType {
case "dummy":
link = &Dummy{}
case "ifb":
link = &Ifb{}
case "bridge":
link = &Bridge{}
case "vlan":
link = &Vlan{}
case "veth":
link = &Veth{}
case "vxlan":
link = &Vxlan{}
case "ipvlan":
link = &IPVlan{}
case "macvlan":
link = &Macvlan{}
case "macvtap":
link = &Macvtap{}
default:
link = &GenericLink{LinkType: linkType}
}
case nl.IFLA_INFO_DATA:
data, err := nl.ParseRouteAttr(info.Value)
if err != nil {
return nil, err
}
switch linkType {
case "vlan":
parseVlanData(link, data)
case "vxlan":
parseVxlanData(link, data)
case "ipvlan":
parseIPVlanData(link, data)
case "macvlan":
parseMacvlanData(link, data)
case "macvtap":
parseMacvtapData(link, data)
}
}
}
case syscall.IFLA_ADDRESS:
var nonzero bool
for _, b := range attr.Value {
if b != 0 {
nonzero = true
}
}
if nonzero {
base.HardwareAddr = attr.Value[:]
}
case syscall.IFLA_IFNAME:
base.Name = string(attr.Value[:len(attr.Value)-1])
case syscall.IFLA_MTU:
base.MTU = int(native.Uint32(attr.Value[0:4]))
case syscall.IFLA_LINK:
base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
case syscall.IFLA_MASTER:
base.MasterIndex = int(native.Uint32(attr.Value[0:4]))
case syscall.IFLA_TXQLEN:
base.TxQLen = int(native.Uint32(attr.Value[0:4]))
}
}
// Links that don't have IFLA_INFO_KIND are hardware devices
if link == nil {
link = &Device{}
}
*link.Attrs() = base
return link, nil
}
// LinkList gets a list of link devices.
// Equivalent to: `ip link show`
func LinkList() ([]Link, error) {
// NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
// to get the message ourselves to parse link type.
req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
req.AddData(msg)
msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
if err != nil {
return nil, err
}
var res []Link
for _, m := range msgs {
link, err := linkDeserialize(m)
if err != nil {
return nil, err
}
res = append(res, link)
}
return res, nil
}
func LinkSetHairpin(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
}
func LinkSetGuard(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
}
func LinkSetFastLeave(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
}
func LinkSetLearning(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
}
func LinkSetRootBlock(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
}
func LinkSetFlood(link Link, mode bool) error {
return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
}
func setProtinfoAttr(link Link, mode bool, attr int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
msg.Index = int32(base.Index)
req.AddData(msg)
br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
nl.NewRtAttrChild(br, attr, boolToByte(mode))
req.AddData(br)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
if err != nil {
return err
}
return nil
}
func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
vlan := link.(*Vlan)
for _, datum := range data {
switch datum.Attr.Type {
case nl.IFLA_VLAN_ID:
vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
}
}
}
func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
vxlan := link.(*Vxlan)
for _, datum := range data {
switch datum.Attr.Type {
case nl.IFLA_VXLAN_ID:
vxlan.VxlanId = int(native.Uint32(datum.Value[0:4]))
case nl.IFLA_VXLAN_LINK:
vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4]))
case nl.IFLA_VXLAN_LOCAL:
vxlan.SrcAddr = net.IP(datum.Value[0:4])
case nl.IFLA_VXLAN_LOCAL6:
vxlan.SrcAddr = net.IP(datum.Value[0:16])
case nl.IFLA_VXLAN_GROUP:
vxlan.Group = net.IP(datum.Value[0:4])
case nl.IFLA_VXLAN_GROUP6:
vxlan.Group = net.IP(datum.Value[0:16])
case nl.IFLA_VXLAN_TTL:
vxlan.TTL = int(datum.Value[0])
case nl.IFLA_VXLAN_TOS:
vxlan.TOS = int(datum.Value[0])
case nl.IFLA_VXLAN_LEARNING:
vxlan.Learning = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_PROXY:
vxlan.Proxy = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_RSC:
vxlan.RSC = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_L2MISS:
vxlan.L2miss = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_L3MISS:
vxlan.L3miss = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_GBP:
vxlan.GBP = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_AGEING:
vxlan.Age = int(native.Uint32(datum.Value[0:4]))
vxlan.NoAge = vxlan.Age == 0
case nl.IFLA_VXLAN_LIMIT:
vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
case nl.IFLA_VXLAN_PORT:
vxlan.Port = int(native.Uint16(datum.Value[0:2]))
case nl.IFLA_VXLAN_PORT_RANGE:
buf := bytes.NewBuffer(datum.Value[0:4])
var pr vxlanPortRange
if binary.Read(buf, binary.BigEndian, &pr) != nil {
vxlan.PortLow = int(pr.Lo)
vxlan.PortHigh = int(pr.Hi)
}
}
}
}
func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
ipv := link.(*IPVlan)
for _, datum := range data {
if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
return
}
}
}
func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) {
macv := link.(*Macvtap)
parseMacvlanData(&macv.Macvlan, data)
}
func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) {
macv := link.(*Macvlan)
for _, datum := range data {
if datum.Attr.Type == nl.IFLA_MACVLAN_MODE {
switch native.Uint32(datum.Value[0:4]) {
case nl.MACVLAN_MODE_PRIVATE:
macv.Mode = MACVLAN_MODE_PRIVATE
case nl.MACVLAN_MODE_VEPA:
macv.Mode = MACVLAN_MODE_VEPA
case nl.MACVLAN_MODE_BRIDGE:
macv.Mode = MACVLAN_MODE_BRIDGE
case nl.MACVLAN_MODE_PASSTHRU:
macv.Mode = MACVLAN_MODE_PASSTHRU
case nl.MACVLAN_MODE_SOURCE:
macv.Mode = MACVLAN_MODE_SOURCE
}
return
}
}
}
// copied from pkg/net_linux.go
func linkFlags(rawFlags uint32) net.Flags {
var f net.Flags
if rawFlags&syscall.IFF_UP != 0 {
f |= net.FlagUp
}
if rawFlags&syscall.IFF_BROADCAST != 0 {
f |= net.FlagBroadcast
}
if rawFlags&syscall.IFF_LOOPBACK != 0 {
f |= net.FlagLoopback
}
if rawFlags&syscall.IFF_POINTOPOINT != 0 {
f |= net.FlagPointToPoint
}
if rawFlags&syscall.IFF_MULTICAST != 0 {
f |= net.FlagMulticast
}
return f
}
| Godeps/_workspace/src/github.com/vishvananda/netlink/link_linux.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0029848047997802496,
0.0002363675885135308,
0.00016237555246334523,
0.00017004874825943261,
0.0003338016977068037
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t_ \"net/http/pprof\"\n",
"\t\"time\"\n",
"\n",
"\t\"k8s.io/kubernetes/pkg/api\"\n",
"\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n",
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 24
} | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
api "k8s.io/kubernetes/pkg/api"
core "k8s.io/kubernetes/pkg/client/testing/core"
labels "k8s.io/kubernetes/pkg/labels"
watch "k8s.io/kubernetes/pkg/watch"
)
// FakePodTemplates implements PodTemplateInterface
type FakePodTemplates struct {
Fake *FakeLegacy
ns string
}
func (c *FakePodTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) {
obj, err := c.Fake.
Invokes(core.NewCreateAction("podtemplates", c.ns, podTemplate), &api.PodTemplate{})
if obj == nil {
return nil, err
}
return obj.(*api.PodTemplate), err
}
func (c *FakePodTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) {
obj, err := c.Fake.
Invokes(core.NewUpdateAction("podtemplates", c.ns, podTemplate), &api.PodTemplate{})
if obj == nil {
return nil, err
}
return obj.(*api.PodTemplate), err
}
func (c *FakePodTemplates) Delete(name string, options *api.DeleteOptions) error {
_, err := c.Fake.
Invokes(core.NewDeleteAction("podtemplates", c.ns, name), &api.PodTemplate{})
return err
}
func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
action := core.NewDeleteCollectionAction("events", c.ns, listOptions)
_, err := c.Fake.Invokes(action, &api.PodTemplateList{})
return err
}
func (c *FakePodTemplates) Get(name string) (result *api.PodTemplate, err error) {
obj, err := c.Fake.
Invokes(core.NewGetAction("podtemplates", c.ns, name), &api.PodTemplate{})
if obj == nil {
return nil, err
}
return obj.(*api.PodTemplate), err
}
func (c *FakePodTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) {
obj, err := c.Fake.
Invokes(core.NewListAction("podtemplates", c.ns, opts), &api.PodTemplateList{})
if obj == nil {
return nil, err
}
label := opts.LabelSelector
if label == nil {
label = labels.Everything()
}
list := &api.PodTemplateList{}
for _, item := range obj.(*api.PodTemplateList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested podTemplates.
func (c *FakePodTemplates) Watch(opts api.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(core.NewWatchAction("podtemplates", c.ns, opts))
}
| pkg/client/typed/generated/legacy/unversioned/fake/fake_podtemplate.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0016783798346295953,
0.00034574707387946546,
0.00016350179794244468,
0.00016849667008500546,
0.0004403612401802093
] |
{
"id": 1,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n",
"\t\"k8s.io/kubernetes/pkg/kubelet/qos\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1\"\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 26
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) {
scheme.AddDefaultingFuncs(
func(obj *KubeProxyConfiguration) {
if obj.BindAddress == "" {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10249
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.IPTablesSyncPeriod.Duration == 0 {
obj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}
}
},
)
}
| pkg/apis/componentconfig/v1alpha1/defaults.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.03852083161473274,
0.0078424708917737,
0.0001692107180133462,
0.0001746473863022402,
0.015339180827140808
] |
{
"id": 1,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n",
"\t\"k8s.io/kubernetes/pkg/kubelet/qos\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1\"\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 26
} | node_modules/
| docs/getting-started-guides/coreos/azure/.gitignore | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0001716286496957764,
0.0001716286496957764,
0.0001716286496957764,
0.0001716286496957764,
0
] |
{
"id": 1,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n",
"\t\"k8s.io/kubernetes/pkg/kubelet/qos\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1\"\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 26
} | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package ec2iface provides an interface for the Amazon Elastic Compute Cloud.
package ec2iface
import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/ec2"
)
// EC2API is the interface type for ec2.EC2.
type EC2API interface {
AcceptVpcPeeringConnectionRequest(*ec2.AcceptVpcPeeringConnectionInput) (*request.Request, *ec2.AcceptVpcPeeringConnectionOutput)
AcceptVpcPeeringConnection(*ec2.AcceptVpcPeeringConnectionInput) (*ec2.AcceptVpcPeeringConnectionOutput, error)
AllocateAddressRequest(*ec2.AllocateAddressInput) (*request.Request, *ec2.AllocateAddressOutput)
AllocateAddress(*ec2.AllocateAddressInput) (*ec2.AllocateAddressOutput, error)
AllocateHostsRequest(*ec2.AllocateHostsInput) (*request.Request, *ec2.AllocateHostsOutput)
AllocateHosts(*ec2.AllocateHostsInput) (*ec2.AllocateHostsOutput, error)
AssignPrivateIpAddressesRequest(*ec2.AssignPrivateIpAddressesInput) (*request.Request, *ec2.AssignPrivateIpAddressesOutput)
AssignPrivateIpAddresses(*ec2.AssignPrivateIpAddressesInput) (*ec2.AssignPrivateIpAddressesOutput, error)
AssociateAddressRequest(*ec2.AssociateAddressInput) (*request.Request, *ec2.AssociateAddressOutput)
AssociateAddress(*ec2.AssociateAddressInput) (*ec2.AssociateAddressOutput, error)
AssociateDhcpOptionsRequest(*ec2.AssociateDhcpOptionsInput) (*request.Request, *ec2.AssociateDhcpOptionsOutput)
AssociateDhcpOptions(*ec2.AssociateDhcpOptionsInput) (*ec2.AssociateDhcpOptionsOutput, error)
AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput)
AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error)
AttachClassicLinkVpcRequest(*ec2.AttachClassicLinkVpcInput) (*request.Request, *ec2.AttachClassicLinkVpcOutput)
AttachClassicLinkVpc(*ec2.AttachClassicLinkVpcInput) (*ec2.AttachClassicLinkVpcOutput, error)
AttachInternetGatewayRequest(*ec2.AttachInternetGatewayInput) (*request.Request, *ec2.AttachInternetGatewayOutput)
AttachInternetGateway(*ec2.AttachInternetGatewayInput) (*ec2.AttachInternetGatewayOutput, error)
AttachNetworkInterfaceRequest(*ec2.AttachNetworkInterfaceInput) (*request.Request, *ec2.AttachNetworkInterfaceOutput)
AttachNetworkInterface(*ec2.AttachNetworkInterfaceInput) (*ec2.AttachNetworkInterfaceOutput, error)
AttachVolumeRequest(*ec2.AttachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
AttachVpnGatewayRequest(*ec2.AttachVpnGatewayInput) (*request.Request, *ec2.AttachVpnGatewayOutput)
AttachVpnGateway(*ec2.AttachVpnGatewayInput) (*ec2.AttachVpnGatewayOutput, error)
AuthorizeSecurityGroupEgressRequest(*ec2.AuthorizeSecurityGroupEgressInput) (*request.Request, *ec2.AuthorizeSecurityGroupEgressOutput)
AuthorizeSecurityGroupEgress(*ec2.AuthorizeSecurityGroupEgressInput) (*ec2.AuthorizeSecurityGroupEgressOutput, error)
AuthorizeSecurityGroupIngressRequest(*ec2.AuthorizeSecurityGroupIngressInput) (*request.Request, *ec2.AuthorizeSecurityGroupIngressOutput)
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
BundleInstanceRequest(*ec2.BundleInstanceInput) (*request.Request, *ec2.BundleInstanceOutput)
BundleInstance(*ec2.BundleInstanceInput) (*ec2.BundleInstanceOutput, error)
CancelBundleTaskRequest(*ec2.CancelBundleTaskInput) (*request.Request, *ec2.CancelBundleTaskOutput)
CancelBundleTask(*ec2.CancelBundleTaskInput) (*ec2.CancelBundleTaskOutput, error)
CancelConversionTaskRequest(*ec2.CancelConversionTaskInput) (*request.Request, *ec2.CancelConversionTaskOutput)
CancelConversionTask(*ec2.CancelConversionTaskInput) (*ec2.CancelConversionTaskOutput, error)
CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput)
CancelExportTask(*ec2.CancelExportTaskInput) (*ec2.CancelExportTaskOutput, error)
CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput)
CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error)
CancelReservedInstancesListingRequest(*ec2.CancelReservedInstancesListingInput) (*request.Request, *ec2.CancelReservedInstancesListingOutput)
CancelReservedInstancesListing(*ec2.CancelReservedInstancesListingInput) (*ec2.CancelReservedInstancesListingOutput, error)
CancelSpotFleetRequestsRequest(*ec2.CancelSpotFleetRequestsInput) (*request.Request, *ec2.CancelSpotFleetRequestsOutput)
CancelSpotFleetRequests(*ec2.CancelSpotFleetRequestsInput) (*ec2.CancelSpotFleetRequestsOutput, error)
CancelSpotInstanceRequestsRequest(*ec2.CancelSpotInstanceRequestsInput) (*request.Request, *ec2.CancelSpotInstanceRequestsOutput)
CancelSpotInstanceRequests(*ec2.CancelSpotInstanceRequestsInput) (*ec2.CancelSpotInstanceRequestsOutput, error)
ConfirmProductInstanceRequest(*ec2.ConfirmProductInstanceInput) (*request.Request, *ec2.ConfirmProductInstanceOutput)
ConfirmProductInstance(*ec2.ConfirmProductInstanceInput) (*ec2.ConfirmProductInstanceOutput, error)
CopyImageRequest(*ec2.CopyImageInput) (*request.Request, *ec2.CopyImageOutput)
CopyImage(*ec2.CopyImageInput) (*ec2.CopyImageOutput, error)
CopySnapshotRequest(*ec2.CopySnapshotInput) (*request.Request, *ec2.CopySnapshotOutput)
CopySnapshot(*ec2.CopySnapshotInput) (*ec2.CopySnapshotOutput, error)
CreateCustomerGatewayRequest(*ec2.CreateCustomerGatewayInput) (*request.Request, *ec2.CreateCustomerGatewayOutput)
CreateCustomerGateway(*ec2.CreateCustomerGatewayInput) (*ec2.CreateCustomerGatewayOutput, error)
CreateDhcpOptionsRequest(*ec2.CreateDhcpOptionsInput) (*request.Request, *ec2.CreateDhcpOptionsOutput)
CreateDhcpOptions(*ec2.CreateDhcpOptionsInput) (*ec2.CreateDhcpOptionsOutput, error)
CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput)
CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error)
CreateImageRequest(*ec2.CreateImageInput) (*request.Request, *ec2.CreateImageOutput)
CreateImage(*ec2.CreateImageInput) (*ec2.CreateImageOutput, error)
CreateInstanceExportTaskRequest(*ec2.CreateInstanceExportTaskInput) (*request.Request, *ec2.CreateInstanceExportTaskOutput)
CreateInstanceExportTask(*ec2.CreateInstanceExportTaskInput) (*ec2.CreateInstanceExportTaskOutput, error)
CreateInternetGatewayRequest(*ec2.CreateInternetGatewayInput) (*request.Request, *ec2.CreateInternetGatewayOutput)
CreateInternetGateway(*ec2.CreateInternetGatewayInput) (*ec2.CreateInternetGatewayOutput, error)
CreateKeyPairRequest(*ec2.CreateKeyPairInput) (*request.Request, *ec2.CreateKeyPairOutput)
CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error)
CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput)
CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error)
CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput)
CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error)
CreateNetworkAclEntryRequest(*ec2.CreateNetworkAclEntryInput) (*request.Request, *ec2.CreateNetworkAclEntryOutput)
CreateNetworkAclEntry(*ec2.CreateNetworkAclEntryInput) (*ec2.CreateNetworkAclEntryOutput, error)
CreateNetworkInterfaceRequest(*ec2.CreateNetworkInterfaceInput) (*request.Request, *ec2.CreateNetworkInterfaceOutput)
CreateNetworkInterface(*ec2.CreateNetworkInterfaceInput) (*ec2.CreateNetworkInterfaceOutput, error)
CreatePlacementGroupRequest(*ec2.CreatePlacementGroupInput) (*request.Request, *ec2.CreatePlacementGroupOutput)
CreatePlacementGroup(*ec2.CreatePlacementGroupInput) (*ec2.CreatePlacementGroupOutput, error)
CreateReservedInstancesListingRequest(*ec2.CreateReservedInstancesListingInput) (*request.Request, *ec2.CreateReservedInstancesListingOutput)
CreateReservedInstancesListing(*ec2.CreateReservedInstancesListingInput) (*ec2.CreateReservedInstancesListingOutput, error)
CreateRouteRequest(*ec2.CreateRouteInput) (*request.Request, *ec2.CreateRouteOutput)
CreateRoute(*ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
CreateRouteTableRequest(*ec2.CreateRouteTableInput) (*request.Request, *ec2.CreateRouteTableOutput)
CreateRouteTable(*ec2.CreateRouteTableInput) (*ec2.CreateRouteTableOutput, error)
CreateSecurityGroupRequest(*ec2.CreateSecurityGroupInput) (*request.Request, *ec2.CreateSecurityGroupOutput)
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
CreateSnapshotRequest(*ec2.CreateSnapshotInput) (*request.Request, *ec2.Snapshot)
CreateSnapshot(*ec2.CreateSnapshotInput) (*ec2.Snapshot, error)
CreateSpotDatafeedSubscriptionRequest(*ec2.CreateSpotDatafeedSubscriptionInput) (*request.Request, *ec2.CreateSpotDatafeedSubscriptionOutput)
CreateSpotDatafeedSubscription(*ec2.CreateSpotDatafeedSubscriptionInput) (*ec2.CreateSpotDatafeedSubscriptionOutput, error)
CreateSubnetRequest(*ec2.CreateSubnetInput) (*request.Request, *ec2.CreateSubnetOutput)
CreateSubnet(*ec2.CreateSubnetInput) (*ec2.CreateSubnetOutput, error)
CreateTagsRequest(*ec2.CreateTagsInput) (*request.Request, *ec2.CreateTagsOutput)
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
CreateVolumeRequest(*ec2.CreateVolumeInput) (*request.Request, *ec2.Volume)
CreateVolume(*ec2.CreateVolumeInput) (*ec2.Volume, error)
CreateVpcRequest(*ec2.CreateVpcInput) (*request.Request, *ec2.CreateVpcOutput)
CreateVpc(*ec2.CreateVpcInput) (*ec2.CreateVpcOutput, error)
CreateVpcEndpointRequest(*ec2.CreateVpcEndpointInput) (*request.Request, *ec2.CreateVpcEndpointOutput)
CreateVpcEndpoint(*ec2.CreateVpcEndpointInput) (*ec2.CreateVpcEndpointOutput, error)
CreateVpcPeeringConnectionRequest(*ec2.CreateVpcPeeringConnectionInput) (*request.Request, *ec2.CreateVpcPeeringConnectionOutput)
CreateVpcPeeringConnection(*ec2.CreateVpcPeeringConnectionInput) (*ec2.CreateVpcPeeringConnectionOutput, error)
CreateVpnConnectionRequest(*ec2.CreateVpnConnectionInput) (*request.Request, *ec2.CreateVpnConnectionOutput)
CreateVpnConnection(*ec2.CreateVpnConnectionInput) (*ec2.CreateVpnConnectionOutput, error)
CreateVpnConnectionRouteRequest(*ec2.CreateVpnConnectionRouteInput) (*request.Request, *ec2.CreateVpnConnectionRouteOutput)
CreateVpnConnectionRoute(*ec2.CreateVpnConnectionRouteInput) (*ec2.CreateVpnConnectionRouteOutput, error)
CreateVpnGatewayRequest(*ec2.CreateVpnGatewayInput) (*request.Request, *ec2.CreateVpnGatewayOutput)
CreateVpnGateway(*ec2.CreateVpnGatewayInput) (*ec2.CreateVpnGatewayOutput, error)
DeleteCustomerGatewayRequest(*ec2.DeleteCustomerGatewayInput) (*request.Request, *ec2.DeleteCustomerGatewayOutput)
DeleteCustomerGateway(*ec2.DeleteCustomerGatewayInput) (*ec2.DeleteCustomerGatewayOutput, error)
DeleteDhcpOptionsRequest(*ec2.DeleteDhcpOptionsInput) (*request.Request, *ec2.DeleteDhcpOptionsOutput)
DeleteDhcpOptions(*ec2.DeleteDhcpOptionsInput) (*ec2.DeleteDhcpOptionsOutput, error)
DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput)
DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error)
DeleteInternetGatewayRequest(*ec2.DeleteInternetGatewayInput) (*request.Request, *ec2.DeleteInternetGatewayOutput)
DeleteInternetGateway(*ec2.DeleteInternetGatewayInput) (*ec2.DeleteInternetGatewayOutput, error)
DeleteKeyPairRequest(*ec2.DeleteKeyPairInput) (*request.Request, *ec2.DeleteKeyPairOutput)
DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error)
DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput)
DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error)
DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput)
DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error)
DeleteNetworkAclEntryRequest(*ec2.DeleteNetworkAclEntryInput) (*request.Request, *ec2.DeleteNetworkAclEntryOutput)
DeleteNetworkAclEntry(*ec2.DeleteNetworkAclEntryInput) (*ec2.DeleteNetworkAclEntryOutput, error)
DeleteNetworkInterfaceRequest(*ec2.DeleteNetworkInterfaceInput) (*request.Request, *ec2.DeleteNetworkInterfaceOutput)
DeleteNetworkInterface(*ec2.DeleteNetworkInterfaceInput) (*ec2.DeleteNetworkInterfaceOutput, error)
DeletePlacementGroupRequest(*ec2.DeletePlacementGroupInput) (*request.Request, *ec2.DeletePlacementGroupOutput)
DeletePlacementGroup(*ec2.DeletePlacementGroupInput) (*ec2.DeletePlacementGroupOutput, error)
DeleteRouteRequest(*ec2.DeleteRouteInput) (*request.Request, *ec2.DeleteRouteOutput)
DeleteRoute(*ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
DeleteRouteTableRequest(*ec2.DeleteRouteTableInput) (*request.Request, *ec2.DeleteRouteTableOutput)
DeleteRouteTable(*ec2.DeleteRouteTableInput) (*ec2.DeleteRouteTableOutput, error)
DeleteSecurityGroupRequest(*ec2.DeleteSecurityGroupInput) (*request.Request, *ec2.DeleteSecurityGroupOutput)
DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
DeleteSnapshotRequest(*ec2.DeleteSnapshotInput) (*request.Request, *ec2.DeleteSnapshotOutput)
DeleteSnapshot(*ec2.DeleteSnapshotInput) (*ec2.DeleteSnapshotOutput, error)
DeleteSpotDatafeedSubscriptionRequest(*ec2.DeleteSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DeleteSpotDatafeedSubscriptionOutput)
DeleteSpotDatafeedSubscription(*ec2.DeleteSpotDatafeedSubscriptionInput) (*ec2.DeleteSpotDatafeedSubscriptionOutput, error)
DeleteSubnetRequest(*ec2.DeleteSubnetInput) (*request.Request, *ec2.DeleteSubnetOutput)
DeleteSubnet(*ec2.DeleteSubnetInput) (*ec2.DeleteSubnetOutput, error)
DeleteTagsRequest(*ec2.DeleteTagsInput) (*request.Request, *ec2.DeleteTagsOutput)
DeleteTags(*ec2.DeleteTagsInput) (*ec2.DeleteTagsOutput, error)
DeleteVolumeRequest(*ec2.DeleteVolumeInput) (*request.Request, *ec2.DeleteVolumeOutput)
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
DeleteVpcRequest(*ec2.DeleteVpcInput) (*request.Request, *ec2.DeleteVpcOutput)
DeleteVpc(*ec2.DeleteVpcInput) (*ec2.DeleteVpcOutput, error)
DeleteVpcEndpointsRequest(*ec2.DeleteVpcEndpointsInput) (*request.Request, *ec2.DeleteVpcEndpointsOutput)
DeleteVpcEndpoints(*ec2.DeleteVpcEndpointsInput) (*ec2.DeleteVpcEndpointsOutput, error)
DeleteVpcPeeringConnectionRequest(*ec2.DeleteVpcPeeringConnectionInput) (*request.Request, *ec2.DeleteVpcPeeringConnectionOutput)
DeleteVpcPeeringConnection(*ec2.DeleteVpcPeeringConnectionInput) (*ec2.DeleteVpcPeeringConnectionOutput, error)
DeleteVpnConnectionRequest(*ec2.DeleteVpnConnectionInput) (*request.Request, *ec2.DeleteVpnConnectionOutput)
DeleteVpnConnection(*ec2.DeleteVpnConnectionInput) (*ec2.DeleteVpnConnectionOutput, error)
DeleteVpnConnectionRouteRequest(*ec2.DeleteVpnConnectionRouteInput) (*request.Request, *ec2.DeleteVpnConnectionRouteOutput)
DeleteVpnConnectionRoute(*ec2.DeleteVpnConnectionRouteInput) (*ec2.DeleteVpnConnectionRouteOutput, error)
DeleteVpnGatewayRequest(*ec2.DeleteVpnGatewayInput) (*request.Request, *ec2.DeleteVpnGatewayOutput)
DeleteVpnGateway(*ec2.DeleteVpnGatewayInput) (*ec2.DeleteVpnGatewayOutput, error)
DeregisterImageRequest(*ec2.DeregisterImageInput) (*request.Request, *ec2.DeregisterImageOutput)
DeregisterImage(*ec2.DeregisterImageInput) (*ec2.DeregisterImageOutput, error)
DescribeAccountAttributesRequest(*ec2.DescribeAccountAttributesInput) (*request.Request, *ec2.DescribeAccountAttributesOutput)
DescribeAccountAttributes(*ec2.DescribeAccountAttributesInput) (*ec2.DescribeAccountAttributesOutput, error)
DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput)
DescribeAddresses(*ec2.DescribeAddressesInput) (*ec2.DescribeAddressesOutput, error)
DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput)
DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error)
DescribeBundleTasksRequest(*ec2.DescribeBundleTasksInput) (*request.Request, *ec2.DescribeBundleTasksOutput)
DescribeBundleTasks(*ec2.DescribeBundleTasksInput) (*ec2.DescribeBundleTasksOutput, error)
DescribeClassicLinkInstancesRequest(*ec2.DescribeClassicLinkInstancesInput) (*request.Request, *ec2.DescribeClassicLinkInstancesOutput)
DescribeClassicLinkInstances(*ec2.DescribeClassicLinkInstancesInput) (*ec2.DescribeClassicLinkInstancesOutput, error)
DescribeConversionTasksRequest(*ec2.DescribeConversionTasksInput) (*request.Request, *ec2.DescribeConversionTasksOutput)
DescribeConversionTasks(*ec2.DescribeConversionTasksInput) (*ec2.DescribeConversionTasksOutput, error)
DescribeCustomerGatewaysRequest(*ec2.DescribeCustomerGatewaysInput) (*request.Request, *ec2.DescribeCustomerGatewaysOutput)
DescribeCustomerGateways(*ec2.DescribeCustomerGatewaysInput) (*ec2.DescribeCustomerGatewaysOutput, error)
DescribeDhcpOptionsRequest(*ec2.DescribeDhcpOptionsInput) (*request.Request, *ec2.DescribeDhcpOptionsOutput)
DescribeDhcpOptions(*ec2.DescribeDhcpOptionsInput) (*ec2.DescribeDhcpOptionsOutput, error)
DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput)
DescribeExportTasks(*ec2.DescribeExportTasksInput) (*ec2.DescribeExportTasksOutput, error)
DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput)
DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error)
DescribeHostsRequest(*ec2.DescribeHostsInput) (*request.Request, *ec2.DescribeHostsOutput)
DescribeHosts(*ec2.DescribeHostsInput) (*ec2.DescribeHostsOutput, error)
DescribeIdFormatRequest(*ec2.DescribeIdFormatInput) (*request.Request, *ec2.DescribeIdFormatOutput)
DescribeIdFormat(*ec2.DescribeIdFormatInput) (*ec2.DescribeIdFormatOutput, error)
DescribeImageAttributeRequest(*ec2.DescribeImageAttributeInput) (*request.Request, *ec2.DescribeImageAttributeOutput)
DescribeImageAttribute(*ec2.DescribeImageAttributeInput) (*ec2.DescribeImageAttributeOutput, error)
DescribeImagesRequest(*ec2.DescribeImagesInput) (*request.Request, *ec2.DescribeImagesOutput)
DescribeImages(*ec2.DescribeImagesInput) (*ec2.DescribeImagesOutput, error)
DescribeImportImageTasksRequest(*ec2.DescribeImportImageTasksInput) (*request.Request, *ec2.DescribeImportImageTasksOutput)
DescribeImportImageTasks(*ec2.DescribeImportImageTasksInput) (*ec2.DescribeImportImageTasksOutput, error)
DescribeImportSnapshotTasksRequest(*ec2.DescribeImportSnapshotTasksInput) (*request.Request, *ec2.DescribeImportSnapshotTasksOutput)
DescribeImportSnapshotTasks(*ec2.DescribeImportSnapshotTasksInput) (*ec2.DescribeImportSnapshotTasksOutput, error)
DescribeInstanceAttributeRequest(*ec2.DescribeInstanceAttributeInput) (*request.Request, *ec2.DescribeInstanceAttributeOutput)
DescribeInstanceAttribute(*ec2.DescribeInstanceAttributeInput) (*ec2.DescribeInstanceAttributeOutput, error)
DescribeInstanceStatusRequest(*ec2.DescribeInstanceStatusInput) (*request.Request, *ec2.DescribeInstanceStatusOutput)
DescribeInstanceStatus(*ec2.DescribeInstanceStatusInput) (*ec2.DescribeInstanceStatusOutput, error)
DescribeInstanceStatusPages(*ec2.DescribeInstanceStatusInput, func(*ec2.DescribeInstanceStatusOutput, bool) bool) error
DescribeInstancesRequest(*ec2.DescribeInstancesInput) (*request.Request, *ec2.DescribeInstancesOutput)
DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.DescribeInstancesOutput, bool) bool) error
DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput)
DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error)
DescribeKeyPairsRequest(*ec2.DescribeKeyPairsInput) (*request.Request, *ec2.DescribeKeyPairsOutput)
DescribeKeyPairs(*ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput)
DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error)
DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput)
DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error)
DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput)
DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error)
DescribeNetworkInterfaceAttributeRequest(*ec2.DescribeNetworkInterfaceAttributeInput) (*request.Request, *ec2.DescribeNetworkInterfaceAttributeOutput)
DescribeNetworkInterfaceAttribute(*ec2.DescribeNetworkInterfaceAttributeInput) (*ec2.DescribeNetworkInterfaceAttributeOutput, error)
DescribeNetworkInterfacesRequest(*ec2.DescribeNetworkInterfacesInput) (*request.Request, *ec2.DescribeNetworkInterfacesOutput)
DescribeNetworkInterfaces(*ec2.DescribeNetworkInterfacesInput) (*ec2.DescribeNetworkInterfacesOutput, error)
DescribePlacementGroupsRequest(*ec2.DescribePlacementGroupsInput) (*request.Request, *ec2.DescribePlacementGroupsOutput)
DescribePlacementGroups(*ec2.DescribePlacementGroupsInput) (*ec2.DescribePlacementGroupsOutput, error)
DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput)
DescribePrefixLists(*ec2.DescribePrefixListsInput) (*ec2.DescribePrefixListsOutput, error)
DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput)
DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error)
DescribeReservedInstancesRequest(*ec2.DescribeReservedInstancesInput) (*request.Request, *ec2.DescribeReservedInstancesOutput)
DescribeReservedInstances(*ec2.DescribeReservedInstancesInput) (*ec2.DescribeReservedInstancesOutput, error)
DescribeReservedInstancesListingsRequest(*ec2.DescribeReservedInstancesListingsInput) (*request.Request, *ec2.DescribeReservedInstancesListingsOutput)
DescribeReservedInstancesListings(*ec2.DescribeReservedInstancesListingsInput) (*ec2.DescribeReservedInstancesListingsOutput, error)
DescribeReservedInstancesModificationsRequest(*ec2.DescribeReservedInstancesModificationsInput) (*request.Request, *ec2.DescribeReservedInstancesModificationsOutput)
DescribeReservedInstancesModifications(*ec2.DescribeReservedInstancesModificationsInput) (*ec2.DescribeReservedInstancesModificationsOutput, error)
DescribeReservedInstancesModificationsPages(*ec2.DescribeReservedInstancesModificationsInput, func(*ec2.DescribeReservedInstancesModificationsOutput, bool) bool) error
DescribeReservedInstancesOfferingsRequest(*ec2.DescribeReservedInstancesOfferingsInput) (*request.Request, *ec2.DescribeReservedInstancesOfferingsOutput)
DescribeReservedInstancesOfferings(*ec2.DescribeReservedInstancesOfferingsInput) (*ec2.DescribeReservedInstancesOfferingsOutput, error)
DescribeReservedInstancesOfferingsPages(*ec2.DescribeReservedInstancesOfferingsInput, func(*ec2.DescribeReservedInstancesOfferingsOutput, bool) bool) error
DescribeRouteTablesRequest(*ec2.DescribeRouteTablesInput) (*request.Request, *ec2.DescribeRouteTablesOutput)
DescribeRouteTables(*ec2.DescribeRouteTablesInput) (*ec2.DescribeRouteTablesOutput, error)
DescribeSecurityGroupsRequest(*ec2.DescribeSecurityGroupsInput) (*request.Request, *ec2.DescribeSecurityGroupsOutput)
DescribeSecurityGroups(*ec2.DescribeSecurityGroupsInput) (*ec2.DescribeSecurityGroupsOutput, error)
DescribeSnapshotAttributeRequest(*ec2.DescribeSnapshotAttributeInput) (*request.Request, *ec2.DescribeSnapshotAttributeOutput)
DescribeSnapshotAttribute(*ec2.DescribeSnapshotAttributeInput) (*ec2.DescribeSnapshotAttributeOutput, error)
DescribeSnapshotsRequest(*ec2.DescribeSnapshotsInput) (*request.Request, *ec2.DescribeSnapshotsOutput)
DescribeSnapshots(*ec2.DescribeSnapshotsInput) (*ec2.DescribeSnapshotsOutput, error)
DescribeSnapshotsPages(*ec2.DescribeSnapshotsInput, func(*ec2.DescribeSnapshotsOutput, bool) bool) error
DescribeSpotDatafeedSubscriptionRequest(*ec2.DescribeSpotDatafeedSubscriptionInput) (*request.Request, *ec2.DescribeSpotDatafeedSubscriptionOutput)
DescribeSpotDatafeedSubscription(*ec2.DescribeSpotDatafeedSubscriptionInput) (*ec2.DescribeSpotDatafeedSubscriptionOutput, error)
DescribeSpotFleetInstancesRequest(*ec2.DescribeSpotFleetInstancesInput) (*request.Request, *ec2.DescribeSpotFleetInstancesOutput)
DescribeSpotFleetInstances(*ec2.DescribeSpotFleetInstancesInput) (*ec2.DescribeSpotFleetInstancesOutput, error)
DescribeSpotFleetRequestHistoryRequest(*ec2.DescribeSpotFleetRequestHistoryInput) (*request.Request, *ec2.DescribeSpotFleetRequestHistoryOutput)
DescribeSpotFleetRequestHistory(*ec2.DescribeSpotFleetRequestHistoryInput) (*ec2.DescribeSpotFleetRequestHistoryOutput, error)
DescribeSpotFleetRequestsRequest(*ec2.DescribeSpotFleetRequestsInput) (*request.Request, *ec2.DescribeSpotFleetRequestsOutput)
DescribeSpotFleetRequests(*ec2.DescribeSpotFleetRequestsInput) (*ec2.DescribeSpotFleetRequestsOutput, error)
DescribeSpotInstanceRequestsRequest(*ec2.DescribeSpotInstanceRequestsInput) (*request.Request, *ec2.DescribeSpotInstanceRequestsOutput)
DescribeSpotInstanceRequests(*ec2.DescribeSpotInstanceRequestsInput) (*ec2.DescribeSpotInstanceRequestsOutput, error)
DescribeSpotPriceHistoryRequest(*ec2.DescribeSpotPriceHistoryInput) (*request.Request, *ec2.DescribeSpotPriceHistoryOutput)
DescribeSpotPriceHistory(*ec2.DescribeSpotPriceHistoryInput) (*ec2.DescribeSpotPriceHistoryOutput, error)
DescribeSpotPriceHistoryPages(*ec2.DescribeSpotPriceHistoryInput, func(*ec2.DescribeSpotPriceHistoryOutput, bool) bool) error
DescribeSubnetsRequest(*ec2.DescribeSubnetsInput) (*request.Request, *ec2.DescribeSubnetsOutput)
DescribeSubnets(*ec2.DescribeSubnetsInput) (*ec2.DescribeSubnetsOutput, error)
DescribeTagsRequest(*ec2.DescribeTagsInput) (*request.Request, *ec2.DescribeTagsOutput)
DescribeTags(*ec2.DescribeTagsInput) (*ec2.DescribeTagsOutput, error)
DescribeTagsPages(*ec2.DescribeTagsInput, func(*ec2.DescribeTagsOutput, bool) bool) error
DescribeVolumeAttributeRequest(*ec2.DescribeVolumeAttributeInput) (*request.Request, *ec2.DescribeVolumeAttributeOutput)
DescribeVolumeAttribute(*ec2.DescribeVolumeAttributeInput) (*ec2.DescribeVolumeAttributeOutput, error)
DescribeVolumeStatusRequest(*ec2.DescribeVolumeStatusInput) (*request.Request, *ec2.DescribeVolumeStatusOutput)
DescribeVolumeStatus(*ec2.DescribeVolumeStatusInput) (*ec2.DescribeVolumeStatusOutput, error)
DescribeVolumeStatusPages(*ec2.DescribeVolumeStatusInput, func(*ec2.DescribeVolumeStatusOutput, bool) bool) error
DescribeVolumesRequest(*ec2.DescribeVolumesInput) (*request.Request, *ec2.DescribeVolumesOutput)
DescribeVolumes(*ec2.DescribeVolumesInput) (*ec2.DescribeVolumesOutput, error)
DescribeVolumesPages(*ec2.DescribeVolumesInput, func(*ec2.DescribeVolumesOutput, bool) bool) error
DescribeVpcAttributeRequest(*ec2.DescribeVpcAttributeInput) (*request.Request, *ec2.DescribeVpcAttributeOutput)
DescribeVpcAttribute(*ec2.DescribeVpcAttributeInput) (*ec2.DescribeVpcAttributeOutput, error)
DescribeVpcClassicLinkRequest(*ec2.DescribeVpcClassicLinkInput) (*request.Request, *ec2.DescribeVpcClassicLinkOutput)
DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error)
DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput)
DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error)
DescribeVpcEndpointsRequest(*ec2.DescribeVpcEndpointsInput) (*request.Request, *ec2.DescribeVpcEndpointsOutput)
DescribeVpcEndpoints(*ec2.DescribeVpcEndpointsInput) (*ec2.DescribeVpcEndpointsOutput, error)
DescribeVpcPeeringConnectionsRequest(*ec2.DescribeVpcPeeringConnectionsInput) (*request.Request, *ec2.DescribeVpcPeeringConnectionsOutput)
DescribeVpcPeeringConnections(*ec2.DescribeVpcPeeringConnectionsInput) (*ec2.DescribeVpcPeeringConnectionsOutput, error)
DescribeVpcsRequest(*ec2.DescribeVpcsInput) (*request.Request, *ec2.DescribeVpcsOutput)
DescribeVpcs(*ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error)
DescribeVpnConnectionsRequest(*ec2.DescribeVpnConnectionsInput) (*request.Request, *ec2.DescribeVpnConnectionsOutput)
DescribeVpnConnections(*ec2.DescribeVpnConnectionsInput) (*ec2.DescribeVpnConnectionsOutput, error)
DescribeVpnGatewaysRequest(*ec2.DescribeVpnGatewaysInput) (*request.Request, *ec2.DescribeVpnGatewaysOutput)
DescribeVpnGateways(*ec2.DescribeVpnGatewaysInput) (*ec2.DescribeVpnGatewaysOutput, error)
DetachClassicLinkVpcRequest(*ec2.DetachClassicLinkVpcInput) (*request.Request, *ec2.DetachClassicLinkVpcOutput)
DetachClassicLinkVpc(*ec2.DetachClassicLinkVpcInput) (*ec2.DetachClassicLinkVpcOutput, error)
DetachInternetGatewayRequest(*ec2.DetachInternetGatewayInput) (*request.Request, *ec2.DetachInternetGatewayOutput)
DetachInternetGateway(*ec2.DetachInternetGatewayInput) (*ec2.DetachInternetGatewayOutput, error)
DetachNetworkInterfaceRequest(*ec2.DetachNetworkInterfaceInput) (*request.Request, *ec2.DetachNetworkInterfaceOutput)
DetachNetworkInterface(*ec2.DetachNetworkInterfaceInput) (*ec2.DetachNetworkInterfaceOutput, error)
DetachVolumeRequest(*ec2.DetachVolumeInput) (*request.Request, *ec2.VolumeAttachment)
DetachVolume(*ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error)
DetachVpnGatewayRequest(*ec2.DetachVpnGatewayInput) (*request.Request, *ec2.DetachVpnGatewayOutput)
DetachVpnGateway(*ec2.DetachVpnGatewayInput) (*ec2.DetachVpnGatewayOutput, error)
DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) (*request.Request, *ec2.DisableVgwRoutePropagationOutput)
DisableVgwRoutePropagation(*ec2.DisableVgwRoutePropagationInput) (*ec2.DisableVgwRoutePropagationOutput, error)
DisableVpcClassicLinkRequest(*ec2.DisableVpcClassicLinkInput) (*request.Request, *ec2.DisableVpcClassicLinkOutput)
DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error)
DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput)
DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error)
DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput)
DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error)
EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) (*request.Request, *ec2.EnableVgwRoutePropagationOutput)
EnableVgwRoutePropagation(*ec2.EnableVgwRoutePropagationInput) (*ec2.EnableVgwRoutePropagationOutput, error)
EnableVolumeIORequest(*ec2.EnableVolumeIOInput) (*request.Request, *ec2.EnableVolumeIOOutput)
EnableVolumeIO(*ec2.EnableVolumeIOInput) (*ec2.EnableVolumeIOOutput, error)
EnableVpcClassicLinkRequest(*ec2.EnableVpcClassicLinkInput) (*request.Request, *ec2.EnableVpcClassicLinkOutput)
EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error)
GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput)
GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error)
GetPasswordDataRequest(*ec2.GetPasswordDataInput) (*request.Request, *ec2.GetPasswordDataOutput)
GetPasswordData(*ec2.GetPasswordDataInput) (*ec2.GetPasswordDataOutput, error)
ImportImageRequest(*ec2.ImportImageInput) (*request.Request, *ec2.ImportImageOutput)
ImportImage(*ec2.ImportImageInput) (*ec2.ImportImageOutput, error)
ImportInstanceRequest(*ec2.ImportInstanceInput) (*request.Request, *ec2.ImportInstanceOutput)
ImportInstance(*ec2.ImportInstanceInput) (*ec2.ImportInstanceOutput, error)
ImportKeyPairRequest(*ec2.ImportKeyPairInput) (*request.Request, *ec2.ImportKeyPairOutput)
ImportKeyPair(*ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
ImportSnapshotRequest(*ec2.ImportSnapshotInput) (*request.Request, *ec2.ImportSnapshotOutput)
ImportSnapshot(*ec2.ImportSnapshotInput) (*ec2.ImportSnapshotOutput, error)
ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput)
ImportVolume(*ec2.ImportVolumeInput) (*ec2.ImportVolumeOutput, error)
ModifyHostsRequest(*ec2.ModifyHostsInput) (*request.Request, *ec2.ModifyHostsOutput)
ModifyHosts(*ec2.ModifyHostsInput) (*ec2.ModifyHostsOutput, error)
ModifyIdFormatRequest(*ec2.ModifyIdFormatInput) (*request.Request, *ec2.ModifyIdFormatOutput)
ModifyIdFormat(*ec2.ModifyIdFormatInput) (*ec2.ModifyIdFormatOutput, error)
ModifyImageAttributeRequest(*ec2.ModifyImageAttributeInput) (*request.Request, *ec2.ModifyImageAttributeOutput)
ModifyImageAttribute(*ec2.ModifyImageAttributeInput) (*ec2.ModifyImageAttributeOutput, error)
ModifyInstanceAttributeRequest(*ec2.ModifyInstanceAttributeInput) (*request.Request, *ec2.ModifyInstanceAttributeOutput)
ModifyInstanceAttribute(*ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) (*request.Request, *ec2.ModifyInstancePlacementOutput)
ModifyInstancePlacement(*ec2.ModifyInstancePlacementInput) (*ec2.ModifyInstancePlacementOutput, error)
ModifyNetworkInterfaceAttributeRequest(*ec2.ModifyNetworkInterfaceAttributeInput) (*request.Request, *ec2.ModifyNetworkInterfaceAttributeOutput)
ModifyNetworkInterfaceAttribute(*ec2.ModifyNetworkInterfaceAttributeInput) (*ec2.ModifyNetworkInterfaceAttributeOutput, error)
ModifyReservedInstancesRequest(*ec2.ModifyReservedInstancesInput) (*request.Request, *ec2.ModifyReservedInstancesOutput)
ModifyReservedInstances(*ec2.ModifyReservedInstancesInput) (*ec2.ModifyReservedInstancesOutput, error)
ModifySnapshotAttributeRequest(*ec2.ModifySnapshotAttributeInput) (*request.Request, *ec2.ModifySnapshotAttributeOutput)
ModifySnapshotAttribute(*ec2.ModifySnapshotAttributeInput) (*ec2.ModifySnapshotAttributeOutput, error)
ModifySpotFleetRequestRequest(*ec2.ModifySpotFleetRequestInput) (*request.Request, *ec2.ModifySpotFleetRequestOutput)
ModifySpotFleetRequest(*ec2.ModifySpotFleetRequestInput) (*ec2.ModifySpotFleetRequestOutput, error)
ModifySubnetAttributeRequest(*ec2.ModifySubnetAttributeInput) (*request.Request, *ec2.ModifySubnetAttributeOutput)
ModifySubnetAttribute(*ec2.ModifySubnetAttributeInput) (*ec2.ModifySubnetAttributeOutput, error)
ModifyVolumeAttributeRequest(*ec2.ModifyVolumeAttributeInput) (*request.Request, *ec2.ModifyVolumeAttributeOutput)
ModifyVolumeAttribute(*ec2.ModifyVolumeAttributeInput) (*ec2.ModifyVolumeAttributeOutput, error)
ModifyVpcAttributeRequest(*ec2.ModifyVpcAttributeInput) (*request.Request, *ec2.ModifyVpcAttributeOutput)
ModifyVpcAttribute(*ec2.ModifyVpcAttributeInput) (*ec2.ModifyVpcAttributeOutput, error)
ModifyVpcEndpointRequest(*ec2.ModifyVpcEndpointInput) (*request.Request, *ec2.ModifyVpcEndpointOutput)
ModifyVpcEndpoint(*ec2.ModifyVpcEndpointInput) (*ec2.ModifyVpcEndpointOutput, error)
MonitorInstancesRequest(*ec2.MonitorInstancesInput) (*request.Request, *ec2.MonitorInstancesOutput)
MonitorInstances(*ec2.MonitorInstancesInput) (*ec2.MonitorInstancesOutput, error)
MoveAddressToVpcRequest(*ec2.MoveAddressToVpcInput) (*request.Request, *ec2.MoveAddressToVpcOutput)
MoveAddressToVpc(*ec2.MoveAddressToVpcInput) (*ec2.MoveAddressToVpcOutput, error)
PurchaseReservedInstancesOfferingRequest(*ec2.PurchaseReservedInstancesOfferingInput) (*request.Request, *ec2.PurchaseReservedInstancesOfferingOutput)
PurchaseReservedInstancesOffering(*ec2.PurchaseReservedInstancesOfferingInput) (*ec2.PurchaseReservedInstancesOfferingOutput, error)
RebootInstancesRequest(*ec2.RebootInstancesInput) (*request.Request, *ec2.RebootInstancesOutput)
RebootInstances(*ec2.RebootInstancesInput) (*ec2.RebootInstancesOutput, error)
RegisterImageRequest(*ec2.RegisterImageInput) (*request.Request, *ec2.RegisterImageOutput)
RegisterImage(*ec2.RegisterImageInput) (*ec2.RegisterImageOutput, error)
RejectVpcPeeringConnectionRequest(*ec2.RejectVpcPeeringConnectionInput) (*request.Request, *ec2.RejectVpcPeeringConnectionOutput)
RejectVpcPeeringConnection(*ec2.RejectVpcPeeringConnectionInput) (*ec2.RejectVpcPeeringConnectionOutput, error)
ReleaseAddressRequest(*ec2.ReleaseAddressInput) (*request.Request, *ec2.ReleaseAddressOutput)
ReleaseAddress(*ec2.ReleaseAddressInput) (*ec2.ReleaseAddressOutput, error)
ReleaseHostsRequest(*ec2.ReleaseHostsInput) (*request.Request, *ec2.ReleaseHostsOutput)
ReleaseHosts(*ec2.ReleaseHostsInput) (*ec2.ReleaseHostsOutput, error)
ReplaceNetworkAclAssociationRequest(*ec2.ReplaceNetworkAclAssociationInput) (*request.Request, *ec2.ReplaceNetworkAclAssociationOutput)
ReplaceNetworkAclAssociation(*ec2.ReplaceNetworkAclAssociationInput) (*ec2.ReplaceNetworkAclAssociationOutput, error)
ReplaceNetworkAclEntryRequest(*ec2.ReplaceNetworkAclEntryInput) (*request.Request, *ec2.ReplaceNetworkAclEntryOutput)
ReplaceNetworkAclEntry(*ec2.ReplaceNetworkAclEntryInput) (*ec2.ReplaceNetworkAclEntryOutput, error)
ReplaceRouteRequest(*ec2.ReplaceRouteInput) (*request.Request, *ec2.ReplaceRouteOutput)
ReplaceRoute(*ec2.ReplaceRouteInput) (*ec2.ReplaceRouteOutput, error)
ReplaceRouteTableAssociationRequest(*ec2.ReplaceRouteTableAssociationInput) (*request.Request, *ec2.ReplaceRouteTableAssociationOutput)
ReplaceRouteTableAssociation(*ec2.ReplaceRouteTableAssociationInput) (*ec2.ReplaceRouteTableAssociationOutput, error)
ReportInstanceStatusRequest(*ec2.ReportInstanceStatusInput) (*request.Request, *ec2.ReportInstanceStatusOutput)
ReportInstanceStatus(*ec2.ReportInstanceStatusInput) (*ec2.ReportInstanceStatusOutput, error)
RequestSpotFleetRequest(*ec2.RequestSpotFleetInput) (*request.Request, *ec2.RequestSpotFleetOutput)
RequestSpotFleet(*ec2.RequestSpotFleetInput) (*ec2.RequestSpotFleetOutput, error)
RequestSpotInstancesRequest(*ec2.RequestSpotInstancesInput) (*request.Request, *ec2.RequestSpotInstancesOutput)
RequestSpotInstances(*ec2.RequestSpotInstancesInput) (*ec2.RequestSpotInstancesOutput, error)
ResetImageAttributeRequest(*ec2.ResetImageAttributeInput) (*request.Request, *ec2.ResetImageAttributeOutput)
ResetImageAttribute(*ec2.ResetImageAttributeInput) (*ec2.ResetImageAttributeOutput, error)
ResetInstanceAttributeRequest(*ec2.ResetInstanceAttributeInput) (*request.Request, *ec2.ResetInstanceAttributeOutput)
ResetInstanceAttribute(*ec2.ResetInstanceAttributeInput) (*ec2.ResetInstanceAttributeOutput, error)
ResetNetworkInterfaceAttributeRequest(*ec2.ResetNetworkInterfaceAttributeInput) (*request.Request, *ec2.ResetNetworkInterfaceAttributeOutput)
ResetNetworkInterfaceAttribute(*ec2.ResetNetworkInterfaceAttributeInput) (*ec2.ResetNetworkInterfaceAttributeOutput, error)
ResetSnapshotAttributeRequest(*ec2.ResetSnapshotAttributeInput) (*request.Request, *ec2.ResetSnapshotAttributeOutput)
ResetSnapshotAttribute(*ec2.ResetSnapshotAttributeInput) (*ec2.ResetSnapshotAttributeOutput, error)
RestoreAddressToClassicRequest(*ec2.RestoreAddressToClassicInput) (*request.Request, *ec2.RestoreAddressToClassicOutput)
RestoreAddressToClassic(*ec2.RestoreAddressToClassicInput) (*ec2.RestoreAddressToClassicOutput, error)
RevokeSecurityGroupEgressRequest(*ec2.RevokeSecurityGroupEgressInput) (*request.Request, *ec2.RevokeSecurityGroupEgressOutput)
RevokeSecurityGroupEgress(*ec2.RevokeSecurityGroupEgressInput) (*ec2.RevokeSecurityGroupEgressOutput, error)
RevokeSecurityGroupIngressRequest(*ec2.RevokeSecurityGroupIngressInput) (*request.Request, *ec2.RevokeSecurityGroupIngressOutput)
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
RunInstancesRequest(*ec2.RunInstancesInput) (*request.Request, *ec2.Reservation)
RunInstances(*ec2.RunInstancesInput) (*ec2.Reservation, error)
StartInstancesRequest(*ec2.StartInstancesInput) (*request.Request, *ec2.StartInstancesOutput)
StartInstances(*ec2.StartInstancesInput) (*ec2.StartInstancesOutput, error)
StopInstancesRequest(*ec2.StopInstancesInput) (*request.Request, *ec2.StopInstancesOutput)
StopInstances(*ec2.StopInstancesInput) (*ec2.StopInstancesOutput, error)
TerminateInstancesRequest(*ec2.TerminateInstancesInput) (*request.Request, *ec2.TerminateInstancesOutput)
TerminateInstances(*ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
UnassignPrivateIpAddressesRequest(*ec2.UnassignPrivateIpAddressesInput) (*request.Request, *ec2.UnassignPrivateIpAddressesOutput)
UnassignPrivateIpAddresses(*ec2.UnassignPrivateIpAddressesInput) (*ec2.UnassignPrivateIpAddressesOutput, error)
UnmonitorInstancesRequest(*ec2.UnmonitorInstancesInput) (*request.Request, *ec2.UnmonitorInstancesOutput)
UnmonitorInstances(*ec2.UnmonitorInstancesInput) (*ec2.UnmonitorInstancesOutput, error)
}
var _ EC2API = (*ec2.EC2)(nil)
| Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.000947029679082334,
0.0002607067581266165,
0.00016415900608990341,
0.00017916336946655065,
0.00017369749548379332
] |
{
"id": 1,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig\"\n",
"\t\"k8s.io/kubernetes/pkg/kubelet/qos\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\n"
],
"labels": [
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1\"\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 26
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Table of Contents
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [Table of Contents](#table-of-contents)
- [Overview](#overview)
- [Adding a new docs collection for a release](#adding-a-new-docs-collection-for-a-release)
- [Updating docs in an existing collection](#updating-docs-in-an-existing-collection)
- [Updating docs on HEAD](#updating-docs-on-head)
- [Updating docs in release branch](#updating-docs-in-release-branch)
- [Updating docs in gh-pages branch](#updating-docs-in-gh-pages-branch)
<!-- END MUNGE: GENERATED_TOC -->
# Overview
This document explains how to update kubernetes release docs hosted at http://kubernetes.io/docs/.
http://kubernetes.io is served using the [gh-pages
branch](https://github.com/kubernetes/kubernetes/tree/gh-pages) of kubernetes repo on github.
Updating docs in that branch will update http://kubernetes.io
There are 2 scenarios which require updating docs:
* Adding a new docs collection for a release.
* Updating docs in an existing collection.
# Adding a new docs collection for a release
Whenever a new release series (`release-X.Y`) is cut from `master`, we push the
corresponding set of docs to `http://kubernetes.io/vX.Y/docs`. The steps are as follows:
* Create a `_vX.Y` folder in `gh-pages` branch.
* Add `vX.Y` as a valid collection in [_config.yml](https://github.com/kubernetes/kubernetes/blob/gh-pages/_config.yml)
* Create a new `_includes/nav_vX.Y.html` file with the navigation menu. This can
be a copy of `_includes/nav_vX.Y-1.html` with links to new docs added and links
to deleted docs removed. Update [_layouts/docwithnav.html]
(https://github.com/kubernetes/kubernetes/blob/gh-pages/_layouts/docwithnav.html)
to include this new navigation html file. Example PR: [#16143](https://github.com/kubernetes/kubernetes/pull/16143).
* [Pull docs from release branch](#updating-docs-in-gh-pages-branch) in `_vX.Y`
folder.
Once these changes have been submitted, you should be able to reach the docs at
`http://kubernetes.io/vX.Y/docs/` where you can test them.
To make `X.Y` the default version of docs:
* Update [_config.yml](https://github.com/kubernetes/kubernetes/blob/gh-pages/_config.yml)
and [/kubernetes/kubernetes/blob/gh-pages/_docs/index.md](https://github.com/kubernetes/kubernetes/blob/gh-pages/_docs/index.md)
to point to the new version. Example PR: [#16416](https://github.com/kubernetes/kubernetes/pull/16416).
* Update [_includes/docversionselector.html](https://github.com/kubernetes/kubernetes/blob/gh-pages/_includes/docversionselector.html)
to make `vX.Y` the default version.
* Add "Disallow: /vX.Y-1/" to existing [robots.txt](https://github.com/kubernetes/kubernetes/blob/gh-pages/robots.txt)
file to hide old content from web crawlers and focus SEO on new docs. Example PR:
[#16388](https://github.com/kubernetes/kubernetes/pull/16388).
* Regenerate [sitemaps.xml](https://github.com/kubernetes/kubernetes/blob/gh-pages/sitemap.xml)
so that it now contains `vX.Y` links. Sitemap can be regenerated using
https://www.xml-sitemaps.com. Example PR: [#17126](https://github.com/kubernetes/kubernetes/pull/17126).
* Resubmit the updated sitemaps file to [Google
webmasters](https://www.google.com/webmasters/tools/sitemap-list?siteUrl=http://kubernetes.io/) for google to index the new links.
* Update [_layouts/docwithnav.html] (https://github.com/kubernetes/kubernetes/blob/gh-pages/_layouts/docwithnav.html)
to include [_includes/archivedocnotice.html](https://github.com/kubernetes/kubernetes/blob/gh-pages/_includes/archivedocnotice.html)
for `vX.Y-1` docs which need to be archived.
* Ping @thockin to update docs.k8s.io to redirect to `http://kubernetes.io/vX.Y/`. [#18788](https://github.com/kubernetes/kubernetes/issues/18788).
http://kubernetes.io/docs/ should now be redirecting to `http://kubernetes.io/vX.Y/`.
# Updating docs in an existing collection
The high level steps to update docs in an existing collection are:
1. Update docs on `HEAD` (master branch)
2. Cherryick the change in relevant release branch.
3. Update docs on `gh-pages`.
## Updating docs on HEAD
[Development guide](development.md) provides general instructions on how to contribute to kubernetes github repo.
[Docs how to guide](how-to-doc.md) provides conventions to follow while writting docs.
## Updating docs in release branch
Once docs have been updated in the master branch, the changes need to be
cherrypicked in the latest release branch.
[Cherrypick guide](cherry-picks.md) has more details on how to cherrypick your change.
## Updating docs in gh-pages branch
Once release branch has all the relevant changes, we can pull in the latest docs
in `gh-pages` branch.
Run the following 2 commands in `gh-pages` branch to update docs for release `X.Y`:
```
_tools/import_docs vX.Y _vX.Y release-X.Y release-X.Y
```
For ex: to pull in docs for release 1.1, run:
```
_tools/import_docs v1.1 _v1.1 release-1.1 release-1.1
```
Apart from copying over the docs, `_tools/release_docs` also does some post processing
(like updating the links to docs to point to http://kubernetes.io/docs/ instead of pointing to github repo).
Note that we always pull in the docs from release branch and not from master (pulling docs
from master requires some extra processing like versionizing the links and removing unversioned warnings).
We delete all existing docs before pulling in new ones to ensure that deleted
docs go away.
If the change added or deleted a doc, then update the corresponding `_includes/nav_vX.Y.html` file as well.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/devel/update-release-docs.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/devel/update-release-docs.md | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0028790761716663837,
0.0006815690430812538,
0.00016242993297055364,
0.0003536221047397703,
0.0007667795871384442
] |
{
"id": 2,
"code_window": [
"\tMaster string\n",
"\tKubeconfig string\n",
"}\n",
"\n",
"func NewProxyConfig() *ProxyServerConfig {\n",
"\treturn &ProxyServerConfig{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tconfig := componentconfig.KubeProxyConfiguration{}\n",
"\tapi.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config)\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 50
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) {
scheme.AddDefaultingFuncs(
func(obj *KubeProxyConfiguration) {
if obj.BindAddress == "" {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10249
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.IPTablesSyncPeriod.Duration == 0 {
obj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}
}
},
)
}
| pkg/apis/componentconfig/v1alpha1/defaults.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.9715912342071533,
0.19449612498283386,
0.00017157367256004363,
0.0001772026007529348,
0.38854753971099854
] |
{
"id": 2,
"code_window": [
"\tMaster string\n",
"\tKubeconfig string\n",
"}\n",
"\n",
"func NewProxyConfig() *ProxyServerConfig {\n",
"\treturn &ProxyServerConfig{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tconfig := componentconfig.KubeProxyConfiguration{}\n",
"\tapi.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config)\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 50
} | package main
import (
"github.com/elazarl/goproxy"
"github.com/elazarl/goproxy/ext/image"
"image"
"log"
"net/http"
)
func main() {
proxy := goproxy.NewProxyHttpServer()
proxy.OnResponse().Do(goproxy_image.HandleImage(func(img image.Image, ctx *goproxy.ProxyCtx) image.Image {
dx, dy := img.Bounds().Dx(), img.Bounds().Dy()
nimg := image.NewRGBA(img.Bounds())
for i := 0; i < dx; i++ {
for j := 0; j <= dy; j++ {
nimg.Set(i, j, img.At(i, dy-j-1))
}
}
return nimg
}))
proxy.Verbose = true
log.Fatal(http.ListenAndServe(":8080", proxy))
}
| Godeps/_workspace/src/github.com/elazarl/goproxy/examples/goproxy-upside-down-ternet/main.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.02357693389058113,
0.007972556166350842,
0.00016874121502041817,
0.00017199495050590485,
0.011033961549401283
] |
{
"id": 2,
"code_window": [
"\tMaster string\n",
"\tKubeconfig string\n",
"}\n",
"\n",
"func NewProxyConfig() *ProxyServerConfig {\n",
"\treturn &ProxyServerConfig{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tconfig := componentconfig.KubeProxyConfiguration{}\n",
"\tapi.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config)\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 50
} | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package walpb
import "errors"
var (
ErrCRCMismatch = errors.New("walpb: crc mismatch")
)
func (rec *Record) Validate(crc uint32) error {
if rec.Crc == crc {
return nil
}
rec.Reset()
return ErrCRCMismatch
}
| Godeps/_workspace/src/github.com/coreos/etcd/wal/walpb/record.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.000188481601071544,
0.00018006545724347234,
0.0001738603023113683,
0.0001778544974513352,
0.00000617046089246287
] |
{
"id": 2,
"code_window": [
"\tMaster string\n",
"\tKubeconfig string\n",
"}\n",
"\n",
"func NewProxyConfig() *ProxyServerConfig {\n",
"\treturn &ProxyServerConfig{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\tconfig := componentconfig.KubeProxyConfiguration{}\n",
"\tapi.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config)\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "add",
"edit_start_line_idx": 50
} | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements typechecking of expressions.
package types
import (
"fmt"
"k8s.io/kubernetes/third_party/golang/go/ast"
"k8s.io/kubernetes/third_party/golang/go/constant"
"k8s.io/kubernetes/third_party/golang/go/token"
"math"
)
/*
Basic algorithm:
Expressions are checked recursively, top down. Expression checker functions
are generally of the form:
func f(x *operand, e *ast.Expr, ...)
where e is the expression to be checked, and x is the result of the check.
The check performed by f may fail in which case x.mode == invalid, and
related error messages will have been issued by f.
If a hint argument is present, it is the composite literal element type
of an outer composite literal; it is used to type-check composite literal
elements that have no explicit type specification in the source
(e.g.: []T{{...}, {...}}, the hint is the type T in this case).
All expressions are checked via rawExpr, which dispatches according
to expression kind. Upon returning, rawExpr is recording the types and
constant values for all expressions that have an untyped type (those types
may change on the way up in the expression tree). Usually these are constants,
but the results of comparisons or non-constant shifts of untyped constants
may also be untyped, but not constant.
Untyped expressions may eventually become fully typed (i.e., not untyped),
typically when the value is assigned to a variable, or is used otherwise.
The updateExprType method is used to record this final type and update
the recorded types: the type-checked expression tree is again traversed down,
and the new type is propagated as needed. Untyped constant expression values
that become fully typed must now be representable by the full type (constant
sub-expression trees are left alone except for their roots). This mechanism
ensures that a client sees the actual (run-time) type an untyped value would
have. It also permits type-checking of lhs shift operands "as if the shift
were not present": when updateExprType visits an untyped lhs shift operand
and assigns it it's final type, that type must be an integer type, and a
constant lhs must be representable as an integer.
When an expression gets its final type, either on the way out from rawExpr,
on the way down in updateExprType, or at the end of the type checker run,
the type (and constant value, if any) is recorded via Info.Types, if present.
*/
type opPredicates map[token.Token]func(Type) bool
var unaryOpPredicates = opPredicates{
token.ADD: isNumeric,
token.SUB: isNumeric,
token.XOR: isInteger,
token.NOT: isBoolean,
}
func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool {
if pred := m[op]; pred != nil {
if !pred(x.typ) {
check.invalidOp(x.pos(), "operator %s not defined for %s", op, x)
return false
}
} else {
check.invalidAST(x.pos(), "unknown operator %s", op)
return false
}
return true
}
// The unary expression e may be nil. It's passed in for better error messages only.
func (check *Checker) unary(x *operand, e *ast.UnaryExpr, op token.Token) {
switch op {
case token.AND:
// spec: "As an exception to the addressability
// requirement x may also be a composite literal."
if _, ok := unparen(x.expr).(*ast.CompositeLit); !ok && x.mode != variable {
check.invalidOp(x.pos(), "cannot take address of %s", x)
x.mode = invalid
return
}
x.mode = value
x.typ = &Pointer{base: x.typ}
return
case token.ARROW:
typ, ok := x.typ.Underlying().(*Chan)
if !ok {
check.invalidOp(x.pos(), "cannot receive from non-channel %s", x)
x.mode = invalid
return
}
if typ.dir == SendOnly {
check.invalidOp(x.pos(), "cannot receive from send-only channel %s", x)
x.mode = invalid
return
}
x.mode = commaok
x.typ = typ.elem
check.hasCallOrRecv = true
return
}
if !check.op(unaryOpPredicates, x, op) {
x.mode = invalid
return
}
if x.mode == constant_ {
typ := x.typ.Underlying().(*Basic)
var prec uint
if isUnsigned(typ) {
prec = uint(check.conf.sizeof(typ) * 8)
}
x.val = constant.UnaryOp(op, x.val, prec)
// Typed constants must be representable in
// their type after each constant operation.
if isTyped(typ) {
if e != nil {
x.expr = e // for better error message
}
check.representable(x, typ)
}
return
}
x.mode = value
// x.typ remains unchanged
}
func isShift(op token.Token) bool {
return op == token.SHL || op == token.SHR
}
func isComparison(op token.Token) bool {
// Note: tokens are not ordered well to make this much easier
switch op {
case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GTR, token.GEQ:
return true
}
return false
}
func fitsFloat32(x constant.Value) bool {
f32, _ := constant.Float32Val(x)
f := float64(f32)
return !math.IsInf(f, 0)
}
func roundFloat32(x constant.Value) constant.Value {
f32, _ := constant.Float32Val(x)
f := float64(f32)
if !math.IsInf(f, 0) {
return constant.MakeFloat64(f)
}
return nil
}
func fitsFloat64(x constant.Value) bool {
f, _ := constant.Float64Val(x)
return !math.IsInf(f, 0)
}
func roundFloat64(x constant.Value) constant.Value {
f, _ := constant.Float64Val(x)
if !math.IsInf(f, 0) {
return constant.MakeFloat64(f)
}
return nil
}
// representableConst reports whether x can be represented as
// value of the given basic type kind and for the configuration
// provided (only needed for int/uint sizes).
//
// If rounded != nil, *rounded is set to the rounded value of x for
// representable floating-point values; it is left alone otherwise.
// It is ok to provide the addressof the first argument for rounded.
func representableConst(x constant.Value, conf *Config, as BasicKind, rounded *constant.Value) bool {
switch x.Kind() {
case constant.Unknown:
return true
case constant.Bool:
return as == Bool || as == UntypedBool
case constant.Int:
if x, ok := constant.Int64Val(x); ok {
switch as {
case Int:
var s = uint(conf.sizeof(Typ[as])) * 8
return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
case Int8:
const s = 8
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int16:
const s = 16
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int32:
const s = 32
return -1<<(s-1) <= x && x <= 1<<(s-1)-1
case Int64:
return true
case Uint, Uintptr:
if s := uint(conf.sizeof(Typ[as])) * 8; s < 64 {
return 0 <= x && x <= int64(1)<<s-1
}
return 0 <= x
case Uint8:
const s = 8
return 0 <= x && x <= 1<<s-1
case Uint16:
const s = 16
return 0 <= x && x <= 1<<s-1
case Uint32:
const s = 32
return 0 <= x && x <= 1<<s-1
case Uint64:
return 0 <= x
case Float32, Float64, Complex64, Complex128,
UntypedInt, UntypedFloat, UntypedComplex:
return true
}
}
n := constant.BitLen(x)
switch as {
case Uint, Uintptr:
var s = uint(conf.sizeof(Typ[as])) * 8
return constant.Sign(x) >= 0 && n <= int(s)
case Uint64:
return constant.Sign(x) >= 0 && n <= 64
case Float32, Complex64:
if rounded == nil {
return fitsFloat32(x)
}
r := roundFloat32(x)
if r != nil {
*rounded = r
return true
}
case Float64, Complex128:
if rounded == nil {
return fitsFloat64(x)
}
r := roundFloat64(x)
if r != nil {
*rounded = r
return true
}
case UntypedInt, UntypedFloat, UntypedComplex:
return true
}
case constant.Float:
switch as {
case Float32, Complex64:
if rounded == nil {
return fitsFloat32(x)
}
r := roundFloat32(x)
if r != nil {
*rounded = r
return true
}
case Float64, Complex128:
if rounded == nil {
return fitsFloat64(x)
}
r := roundFloat64(x)
if r != nil {
*rounded = r
return true
}
case UntypedFloat, UntypedComplex:
return true
}
case constant.Complex:
switch as {
case Complex64:
if rounded == nil {
return fitsFloat32(constant.Real(x)) && fitsFloat32(constant.Imag(x))
}
re := roundFloat32(constant.Real(x))
im := roundFloat32(constant.Imag(x))
if re != nil && im != nil {
*rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
return true
}
case Complex128:
if rounded == nil {
return fitsFloat64(constant.Real(x)) && fitsFloat64(constant.Imag(x))
}
re := roundFloat64(constant.Real(x))
im := roundFloat64(constant.Imag(x))
if re != nil && im != nil {
*rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
return true
}
case UntypedComplex:
return true
}
case constant.String:
return as == String || as == UntypedString
default:
unreachable()
}
return false
}
// representable checks that a constant operand is representable in the given basic type.
func (check *Checker) representable(x *operand, typ *Basic) {
assert(x.mode == constant_)
if !representableConst(x.val, check.conf, typ.kind, &x.val) {
var msg string
if isNumeric(x.typ) && isNumeric(typ) {
// numeric conversion : error msg
//
// integer -> integer : overflows
// integer -> float : overflows (actually not possible)
// float -> integer : truncated
// float -> float : overflows
//
if !isInteger(x.typ) && isInteger(typ) {
msg = "%s truncated to %s"
} else {
msg = "%s overflows %s"
}
} else {
msg = "cannot convert %s to %s"
}
check.errorf(x.pos(), msg, x, typ)
x.mode = invalid
}
}
// updateExprType updates the type of x to typ and invokes itself
// recursively for the operands of x, depending on expression kind.
// If typ is still an untyped and not the final type, updateExprType
// only updates the recorded untyped type for x and possibly its
// operands. Otherwise (i.e., typ is not an untyped type anymore,
// or it is the final type for x), the type and value are recorded.
// Also, if x is a constant, it must be representable as a value of typ,
// and if x is the (formerly untyped) lhs operand of a non-constant
// shift, it must be an integer value.
//
func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) {
old, found := check.untyped[x]
if !found {
return // nothing to do
}
// update operands of x if necessary
switch x := x.(type) {
case *ast.BadExpr,
*ast.FuncLit,
*ast.CompositeLit,
*ast.IndexExpr,
*ast.SliceExpr,
*ast.TypeAssertExpr,
*ast.StarExpr,
*ast.KeyValueExpr,
*ast.ArrayType,
*ast.StructType,
*ast.FuncType,
*ast.InterfaceType,
*ast.MapType,
*ast.ChanType:
// These expression are never untyped - nothing to do.
// The respective sub-expressions got their final types
// upon assignment or use.
if debug {
check.dump("%s: found old type(%s): %s (new: %s)", x.Pos(), x, old.typ, typ)
unreachable()
}
return
case *ast.CallExpr:
// Resulting in an untyped constant (e.g., built-in complex).
// The respective calls take care of calling updateExprType
// for the arguments if necessary.
case *ast.Ident, *ast.BasicLit, *ast.SelectorExpr:
// An identifier denoting a constant, a constant literal,
// or a qualified identifier (imported untyped constant).
// No operands to take care of.
case *ast.ParenExpr:
check.updateExprType(x.X, typ, final)
case *ast.UnaryExpr:
// If x is a constant, the operands were constants.
// They don't need to be updated since they never
// get "materialized" into a typed value; and they
// will be processed at the end of the type check.
if old.val != nil {
break
}
check.updateExprType(x.X, typ, final)
case *ast.BinaryExpr:
if old.val != nil {
break // see comment for unary expressions
}
if isComparison(x.Op) {
// The result type is independent of operand types
// and the operand types must have final types.
} else if isShift(x.Op) {
// The result type depends only on lhs operand.
// The rhs type was updated when checking the shift.
check.updateExprType(x.X, typ, final)
} else {
// The operand types match the result type.
check.updateExprType(x.X, typ, final)
check.updateExprType(x.Y, typ, final)
}
default:
unreachable()
}
// If the new type is not final and still untyped, just
// update the recorded type.
if !final && isUntyped(typ) {
old.typ = typ.Underlying().(*Basic)
check.untyped[x] = old
return
}
// Otherwise we have the final (typed or untyped type).
// Remove it from the map of yet untyped expressions.
delete(check.untyped, x)
// If x is the lhs of a shift, its final type must be integer.
// We already know from the shift check that it is representable
// as an integer if it is a constant.
if old.isLhs && !isInteger(typ) {
check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ)
return
}
// Everything's fine, record final type and value for x.
check.recordTypeAndValue(x, old.mode, typ, old.val)
}
// updateExprVal updates the value of x to val.
func (check *Checker) updateExprVal(x ast.Expr, val constant.Value) {
if info, ok := check.untyped[x]; ok {
info.val = val
check.untyped[x] = info
}
}
// convertUntyped attempts to set the type of an untyped value to the target type.
func (check *Checker) convertUntyped(x *operand, target Type) {
if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
return
}
// TODO(gri) Sloppy code - clean up. This function is central
// to assignment and expression checking.
if isUntyped(target) {
// both x and target are untyped
xkind := x.typ.(*Basic).kind
tkind := target.(*Basic).kind
if isNumeric(x.typ) && isNumeric(target) {
if xkind < tkind {
x.typ = target
check.updateExprType(x.expr, target, false)
}
} else if xkind != tkind {
goto Error
}
return
}
// typed target
switch t := target.Underlying().(type) {
case *Basic:
if x.mode == constant_ {
check.representable(x, t)
if x.mode == invalid {
return
}
// expression value may have been rounded - update if needed
// TODO(gri) A floating-point value may silently underflow to
// zero. If it was negative, the sign is lost. See issue 6898.
check.updateExprVal(x.expr, x.val)
} else {
// Non-constant untyped values may appear as the
// result of comparisons (untyped bool), intermediate
// (delayed-checked) rhs operands of shifts, and as
// the value nil.
switch x.typ.(*Basic).kind {
case UntypedBool:
if !isBoolean(target) {
goto Error
}
case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex:
if !isNumeric(target) {
goto Error
}
case UntypedString:
// Non-constant untyped string values are not
// permitted by the spec and should not occur.
unreachable()
case UntypedNil:
// Unsafe.Pointer is a basic type that includes nil.
if !hasNil(target) {
goto Error
}
default:
goto Error
}
}
case *Interface:
if !x.isNil() && !t.Empty() /* empty interfaces are ok */ {
goto Error
}
// Update operand types to the default type rather then
// the target (interface) type: values must have concrete
// dynamic types. If the value is nil, keep it untyped
// (this is important for tools such as go vet which need
// the dynamic type for argument checking of say, print
// functions)
if x.isNil() {
target = Typ[UntypedNil]
} else {
// cannot assign untyped values to non-empty interfaces
if !t.Empty() {
goto Error
}
target = defaultType(x.typ)
}
case *Pointer, *Signature, *Slice, *Map, *Chan:
if !x.isNil() {
goto Error
}
// keep nil untyped - see comment for interfaces, above
target = Typ[UntypedNil]
default:
goto Error
}
x.typ = target
check.updateExprType(x.expr, target, true) // UntypedNils are final
return
Error:
check.errorf(x.pos(), "cannot convert %s to %s", x, target)
x.mode = invalid
}
func (check *Checker) comparison(x, y *operand, op token.Token) {
// spec: "In any comparison, the first operand must be assignable
// to the type of the second operand, or vice versa."
err := ""
if x.assignableTo(check.conf, y.typ) || y.assignableTo(check.conf, x.typ) {
defined := false
switch op {
case token.EQL, token.NEQ:
// spec: "The equality operators == and != apply to operands that are comparable."
defined = Comparable(x.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
case token.LSS, token.LEQ, token.GTR, token.GEQ:
// spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
defined = isOrdered(x.typ)
default:
unreachable()
}
if !defined {
typ := x.typ
if x.isNil() {
typ = y.typ
}
err = check.sprintf("operator %s not defined for %s", op, typ)
}
} else {
err = check.sprintf("mismatched types %s and %s", x.typ, y.typ)
}
if err != "" {
check.errorf(x.pos(), "cannot compare %s %s %s (%s)", x.expr, op, y.expr, err)
x.mode = invalid
return
}
if x.mode == constant_ && y.mode == constant_ {
x.val = constant.MakeBool(constant.Compare(x.val, op, y.val))
// The operands are never materialized; no need to update
// their types.
} else {
x.mode = value
// The operands have now their final types, which at run-
// time will be materialized. Update the expression trees.
// If the current types are untyped, the materialized type
// is the respective default type.
check.updateExprType(x.expr, defaultType(x.typ), true)
check.updateExprType(y.expr, defaultType(y.typ), true)
}
// spec: "Comparison operators compare two operands and yield
// an untyped boolean value."
x.typ = Typ[UntypedBool]
}
func (check *Checker) shift(x, y *operand, op token.Token) {
untypedx := isUntyped(x.typ)
// The lhs must be of integer type or be representable
// as an integer; otherwise the shift has no chance.
if !x.isInteger() {
check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
x.mode = invalid
return
}
// spec: "The right operand in a shift expression must have unsigned
// integer type or be an untyped constant that can be converted to
// unsigned integer type."
switch {
case isInteger(y.typ) && isUnsigned(y.typ):
// nothing to do
case isUntyped(y.typ):
check.convertUntyped(y, Typ[UntypedInt])
if y.mode == invalid {
x.mode = invalid
return
}
default:
check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
x.mode = invalid
return
}
if x.mode == constant_ {
if y.mode == constant_ {
// rhs must be an integer value
if !y.isInteger() {
check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
x.mode = invalid
return
}
// rhs must be within reasonable bounds
const stupidShift = 1023 - 1 + 52 // so we can express smallestFloat64
s, ok := constant.Uint64Val(y.val)
if !ok || s > stupidShift {
check.invalidOp(y.pos(), "stupid shift count %s", y)
x.mode = invalid
return
}
// The lhs is representable as an integer but may not be an integer
// (e.g., 2.0, an untyped float) - this can only happen for untyped
// non-integer numeric constants. Correct the type so that the shift
// result is of integer type.
if !isInteger(x.typ) {
x.typ = Typ[UntypedInt]
}
x.val = constant.Shift(x.val, op, uint(s))
return
}
// non-constant shift with constant lhs
if untypedx {
// spec: "If the left operand of a non-constant shift
// expression is an untyped constant, the type of the
// constant is what it would be if the shift expression
// were replaced by its left operand alone.".
//
// Delay operand checking until we know the final type:
// The lhs expression must be in the untyped map, mark
// the entry as lhs shift operand.
info, found := check.untyped[x.expr]
assert(found)
info.isLhs = true
check.untyped[x.expr] = info
// keep x's type
x.mode = value
return
}
}
// constant rhs must be >= 0
if y.mode == constant_ && constant.Sign(y.val) < 0 {
check.invalidOp(y.pos(), "shift count %s must not be negative", y)
}
// non-constant shift - lhs must be an integer
if !isInteger(x.typ) {
check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
x.mode = invalid
return
}
x.mode = value
}
var binaryOpPredicates = opPredicates{
token.ADD: func(typ Type) bool { return isNumeric(typ) || isString(typ) },
token.SUB: isNumeric,
token.MUL: isNumeric,
token.QUO: isNumeric,
token.REM: isInteger,
token.AND: isInteger,
token.OR: isInteger,
token.XOR: isInteger,
token.AND_NOT: isInteger,
token.LAND: isBoolean,
token.LOR: isBoolean,
}
// The binary expression e may be nil. It's passed in for better error messages only.
func (check *Checker) binary(x *operand, e *ast.BinaryExpr, lhs, rhs ast.Expr, op token.Token) {
var y operand
check.expr(x, lhs)
check.expr(&y, rhs)
if x.mode == invalid {
return
}
if y.mode == invalid {
x.mode = invalid
x.expr = y.expr
return
}
if isShift(op) {
check.shift(x, &y, op)
return
}
check.convertUntyped(x, y.typ)
if x.mode == invalid {
return
}
check.convertUntyped(&y, x.typ)
if y.mode == invalid {
x.mode = invalid
return
}
if isComparison(op) {
check.comparison(x, &y, op)
return
}
if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
check.invalidOp(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
}
x.mode = invalid
return
}
if !check.op(binaryOpPredicates, x, op) {
x.mode = invalid
return
}
if (op == token.QUO || op == token.REM) && (x.mode == constant_ || isInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 {
check.invalidOp(y.pos(), "division by zero")
x.mode = invalid
return
}
if x.mode == constant_ && y.mode == constant_ {
typ := x.typ.Underlying().(*Basic)
// force integer division of integer operands
if op == token.QUO && isInteger(typ) {
op = token.QUO_ASSIGN
}
x.val = constant.BinaryOp(x.val, op, y.val)
// Typed constants must be representable in
// their type after each constant operation.
if isTyped(typ) {
if e != nil {
x.expr = e // for better error message
}
check.representable(x, typ)
}
return
}
x.mode = value
// x.typ is unchanged
}
// index checks an index expression for validity.
// If max >= 0, it is the upper bound for index.
// If index is valid and the result i >= 0, then i is the constant value of index.
func (check *Checker) index(index ast.Expr, max int64) (i int64, valid bool) {
var x operand
check.expr(&x, index)
if x.mode == invalid {
return
}
// an untyped constant must be representable as Int
check.convertUntyped(&x, Typ[Int])
if x.mode == invalid {
return
}
// the index must be of integer type
if !isInteger(x.typ) {
check.invalidArg(x.pos(), "index %s must be integer", &x)
return
}
// a constant index i must be in bounds
if x.mode == constant_ {
if constant.Sign(x.val) < 0 {
check.invalidArg(x.pos(), "index %s must not be negative", &x)
return
}
i, valid = constant.Int64Val(x.val)
if !valid || max >= 0 && i >= max {
check.errorf(x.pos(), "index %s is out of bounds", &x)
return i, false
}
// 0 <= i [ && i < max ]
return i, true
}
return -1, true
}
// indexElts checks the elements (elts) of an array or slice composite literal
// against the literal's element type (typ), and the element indices against
// the literal length if known (length >= 0). It returns the length of the
// literal (maximum index value + 1).
//
func (check *Checker) indexedElts(elts []ast.Expr, typ Type, length int64) int64 {
visited := make(map[int64]bool, len(elts))
var index, max int64
for _, e := range elts {
// determine and check index
validIndex := false
eval := e
if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
if i, ok := check.index(kv.Key, length); ok {
if i >= 0 {
index = i
validIndex = true
} else {
check.errorf(e.Pos(), "index %s must be integer constant", kv.Key)
}
}
eval = kv.Value
} else if length >= 0 && index >= length {
check.errorf(e.Pos(), "index %d is out of bounds (>= %d)", index, length)
} else {
validIndex = true
}
// if we have a valid index, check for duplicate entries
if validIndex {
if visited[index] {
check.errorf(e.Pos(), "duplicate index %d in array or slice literal", index)
}
visited[index] = true
}
index++
if index > max {
max = index
}
// check element against composite literal element type
var x operand
check.exprWithHint(&x, eval, typ)
if !check.assignment(&x, typ) && x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s value in array or slice literal", &x, typ)
}
}
return max
}
// exprKind describes the kind of an expression; the kind
// determines if an expression is valid in 'statement context'.
type exprKind int
const (
conversion exprKind = iota
expression
statement
)
// rawExpr typechecks expression e and initializes x with the expression
// value or type. If an error occurred, x.mode is set to invalid.
// If hint != nil, it is the type of a composite literal element.
//
func (check *Checker) rawExpr(x *operand, e ast.Expr, hint Type) exprKind {
if trace {
check.trace(e.Pos(), "%s", e)
check.indent++
defer func() {
check.indent--
check.trace(e.Pos(), "=> %s", x)
}()
}
kind := check.exprInternal(x, e, hint)
// convert x into a user-friendly set of values
// TODO(gri) this code can be simplified
var typ Type
var val constant.Value
switch x.mode {
case invalid:
typ = Typ[Invalid]
case novalue:
typ = (*Tuple)(nil)
case constant_:
typ = x.typ
val = x.val
default:
typ = x.typ
}
assert(x.expr != nil && typ != nil)
if isUntyped(typ) {
// delay type and value recording until we know the type
// or until the end of type checking
check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val)
} else {
check.recordTypeAndValue(e, x.mode, typ, val)
}
return kind
}
// exprInternal contains the core of type checking of expressions.
// Must only be called by rawExpr.
//
func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
// make sure x has a valid state in case of bailout
// (was issue 5770)
x.mode = invalid
x.typ = Typ[Invalid]
switch e := e.(type) {
case *ast.BadExpr:
goto Error // error was reported before
case *ast.Ident:
check.ident(x, e, nil, nil)
case *ast.Ellipsis:
// ellipses are handled explicitly where they are legal
// (array composite literals and parameter lists)
check.error(e.Pos(), "invalid use of '...'")
goto Error
case *ast.BasicLit:
x.setConst(e.Kind, e.Value)
if x.mode == invalid {
check.invalidAST(e.Pos(), "invalid literal %v", e.Value)
goto Error
}
case *ast.FuncLit:
if sig, ok := check.typ(e.Type).(*Signature); ok {
// Anonymous functions are considered part of the
// init expression/func declaration which contains
// them: use existing package-level declaration info.
check.funcBody(check.decl, "", sig, e.Body)
x.mode = value
x.typ = sig
} else {
check.invalidAST(e.Pos(), "invalid function literal %s", e)
goto Error
}
case *ast.CompositeLit:
typ := hint
openArray := false
if e.Type != nil {
// [...]T array types may only appear with composite literals.
// Check for them here so we don't have to handle ... in general.
typ = nil
if atyp, _ := e.Type.(*ast.ArrayType); atyp != nil && atyp.Len != nil {
if ellip, _ := atyp.Len.(*ast.Ellipsis); ellip != nil && ellip.Elt == nil {
// We have an "open" [...]T array type.
// Create a new ArrayType with unknown length (-1)
// and finish setting it up after analyzing the literal.
typ = &Array{len: -1, elem: check.typ(atyp.Elt)}
openArray = true
}
}
if typ == nil {
typ = check.typ(e.Type)
}
}
if typ == nil {
// TODO(gri) provide better error messages depending on context
check.error(e.Pos(), "missing type in composite literal")
goto Error
}
switch typ, _ := deref(typ); utyp := typ.Underlying().(type) {
case *Struct:
if len(e.Elts) == 0 {
break
}
fields := utyp.fields
if _, ok := e.Elts[0].(*ast.KeyValueExpr); ok {
// all elements must have keys
visited := make([]bool, len(fields))
for _, e := range e.Elts {
kv, _ := e.(*ast.KeyValueExpr)
if kv == nil {
check.error(e.Pos(), "mixture of field:value and value elements in struct literal")
continue
}
key, _ := kv.Key.(*ast.Ident)
if key == nil {
check.errorf(kv.Pos(), "invalid field name %s in struct literal", kv.Key)
continue
}
i := fieldIndex(utyp.fields, check.pkg, key.Name)
if i < 0 {
check.errorf(kv.Pos(), "unknown field %s in struct literal", key.Name)
continue
}
fld := fields[i]
check.recordUse(key, fld)
// 0 <= i < len(fields)
if visited[i] {
check.errorf(kv.Pos(), "duplicate field name %s in struct literal", key.Name)
continue
}
visited[i] = true
check.expr(x, kv.Value)
etyp := fld.typ
if !check.assignment(x, etyp) {
if x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
}
continue
}
}
} else {
// no element must have a key
for i, e := range e.Elts {
if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
check.error(kv.Pos(), "mixture of field:value and value elements in struct literal")
continue
}
check.expr(x, e)
if i >= len(fields) {
check.error(x.pos(), "too many values in struct literal")
break // cannot continue
}
// i < len(fields)
fld := fields[i]
if !fld.Exported() && fld.pkg != check.pkg {
check.errorf(x.pos(), "implicit assignment to unexported field %s in %s literal", fld.name, typ)
continue
}
etyp := fld.typ
if !check.assignment(x, etyp) {
if x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
}
continue
}
}
if len(e.Elts) < len(fields) {
check.error(e.Rbrace, "too few values in struct literal")
// ok to continue
}
}
case *Array:
n := check.indexedElts(e.Elts, utyp.elem, utyp.len)
// if we have an "open" [...]T array, set the length now that we know it
if openArray {
utyp.len = n
}
case *Slice:
check.indexedElts(e.Elts, utyp.elem, -1)
case *Map:
visited := make(map[interface{}][]Type, len(e.Elts))
for _, e := range e.Elts {
kv, _ := e.(*ast.KeyValueExpr)
if kv == nil {
check.error(e.Pos(), "missing key in map literal")
continue
}
check.exprWithHint(x, kv.Key, utyp.key)
if !check.assignment(x, utyp.key) {
if x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s key in map literal", x, utyp.key)
}
continue
}
if x.mode == constant_ {
duplicate := false
// if the key is of interface type, the type is also significant when checking for duplicates
if _, ok := utyp.key.Underlying().(*Interface); ok {
for _, vtyp := range visited[x.val] {
if Identical(vtyp, x.typ) {
duplicate = true
break
}
}
visited[x.val] = append(visited[x.val], x.typ)
} else {
_, duplicate = visited[x.val]
visited[x.val] = nil
}
if duplicate {
check.errorf(x.pos(), "duplicate key %s in map literal", x.val)
continue
}
}
check.exprWithHint(x, kv.Value, utyp.elem)
if !check.assignment(x, utyp.elem) {
if x.mode != invalid {
check.errorf(x.pos(), "cannot use %s as %s value in map literal", x, utyp.elem)
}
continue
}
}
default:
// if utyp is invalid, an error was reported before
if utyp != Typ[Invalid] {
check.errorf(e.Pos(), "invalid composite literal type %s", typ)
goto Error
}
}
x.mode = value
x.typ = typ
case *ast.ParenExpr:
kind := check.rawExpr(x, e.X, nil)
x.expr = e
return kind
case *ast.SelectorExpr:
check.selector(x, e)
case *ast.IndexExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
valid := false
length := int64(-1) // valid if >= 0
switch typ := x.typ.Underlying().(type) {
case *Basic:
if isString(typ) {
valid = true
if x.mode == constant_ {
length = int64(len(constant.StringVal(x.val)))
}
// an indexed string always yields a byte value
// (not a constant) even if the string and the
// index are constant
x.mode = value
x.typ = universeByte // use 'byte' name
}
case *Array:
valid = true
length = typ.len
if x.mode != variable {
x.mode = value
}
x.typ = typ.elem
case *Pointer:
if typ, _ := typ.base.Underlying().(*Array); typ != nil {
valid = true
length = typ.len
x.mode = variable
x.typ = typ.elem
}
case *Slice:
valid = true
x.mode = variable
x.typ = typ.elem
case *Map:
var key operand
check.expr(&key, e.Index)
if !check.assignment(&key, typ.key) {
if key.mode != invalid {
check.invalidOp(key.pos(), "cannot use %s as map index of type %s", &key, typ.key)
}
goto Error
}
x.mode = mapindex
x.typ = typ.elem
x.expr = e
return expression
}
if !valid {
check.invalidOp(x.pos(), "cannot index %s", x)
goto Error
}
if e.Index == nil {
check.invalidAST(e.Pos(), "missing index for %s", x)
goto Error
}
check.index(e.Index, length)
// ok to continue
case *ast.SliceExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
valid := false
length := int64(-1) // valid if >= 0
switch typ := x.typ.Underlying().(type) {
case *Basic:
if isString(typ) {
if slice3(e) {
check.invalidOp(x.pos(), "3-index slice of string")
goto Error
}
valid = true
if x.mode == constant_ {
length = int64(len(constant.StringVal(x.val)))
}
// spec: "For untyped string operands the result
// is a non-constant value of type string."
if typ.kind == UntypedString {
x.typ = Typ[String]
}
}
case *Array:
valid = true
length = typ.len
if x.mode != variable {
check.invalidOp(x.pos(), "cannot slice %s (value not addressable)", x)
goto Error
}
x.typ = &Slice{elem: typ.elem}
case *Pointer:
if typ, _ := typ.base.Underlying().(*Array); typ != nil {
valid = true
length = typ.len
x.typ = &Slice{elem: typ.elem}
}
case *Slice:
valid = true
// x.typ doesn't change
}
if !valid {
check.invalidOp(x.pos(), "cannot slice %s", x)
goto Error
}
x.mode = value
// spec: "Only the first index may be omitted; it defaults to 0."
if slice3(e) && (e.High == nil || sliceMax(e) == nil) {
check.error(e.Rbrack, "2nd and 3rd index required in 3-index slice")
goto Error
}
// check indices
var ind [3]int64
for i, expr := range []ast.Expr{e.Low, e.High, sliceMax(e)} {
x := int64(-1)
switch {
case expr != nil:
// The "capacity" is only known statically for strings, arrays,
// and pointers to arrays, and it is the same as the length for
// those types.
max := int64(-1)
if length >= 0 {
max = length + 1
}
if t, ok := check.index(expr, max); ok && t >= 0 {
x = t
}
case i == 0:
// default is 0 for the first index
x = 0
case length >= 0:
// default is length (== capacity) otherwise
x = length
}
ind[i] = x
}
// constant indices must be in range
// (check.index already checks that existing indices >= 0)
L:
for i, x := range ind[:len(ind)-1] {
if x > 0 {
for _, y := range ind[i+1:] {
if y >= 0 && x > y {
check.errorf(e.Rbrack, "invalid slice indices: %d > %d", x, y)
break L // only report one error, ok to continue
}
}
}
}
case *ast.TypeAssertExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
xtyp, _ := x.typ.Underlying().(*Interface)
if xtyp == nil {
check.invalidOp(x.pos(), "%s is not an interface", x)
goto Error
}
// x.(type) expressions are handled explicitly in type switches
if e.Type == nil {
check.invalidAST(e.Pos(), "use of .(type) outside type switch")
goto Error
}
T := check.typ(e.Type)
if T == Typ[Invalid] {
goto Error
}
check.typeAssertion(x.pos(), x, xtyp, T)
x.mode = commaok
x.typ = T
case *ast.CallExpr:
return check.call(x, e)
case *ast.StarExpr:
check.exprOrType(x, e.X)
switch x.mode {
case invalid:
goto Error
case typexpr:
x.typ = &Pointer{base: x.typ}
default:
if typ, ok := x.typ.Underlying().(*Pointer); ok {
x.mode = variable
x.typ = typ.base
} else {
check.invalidOp(x.pos(), "cannot indirect %s", x)
goto Error
}
}
case *ast.UnaryExpr:
check.expr(x, e.X)
if x.mode == invalid {
goto Error
}
check.unary(x, e, e.Op)
if x.mode == invalid {
goto Error
}
if e.Op == token.ARROW {
x.expr = e
return statement // receive operations may appear in statement context
}
case *ast.BinaryExpr:
check.binary(x, e, e.X, e.Y, e.Op)
if x.mode == invalid {
goto Error
}
case *ast.KeyValueExpr:
// key:value expressions are handled in composite literals
check.invalidAST(e.Pos(), "no key:value expected")
goto Error
case *ast.ArrayType, *ast.StructType, *ast.FuncType,
*ast.InterfaceType, *ast.MapType, *ast.ChanType:
x.mode = typexpr
x.typ = check.typ(e)
// Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue
// even though check.typ has already called it. This is fine as both
// times the same expression and type are recorded. It is also not a
// performance issue because we only reach here for composite literal
// types, which are comparatively rare.
default:
panic(fmt.Sprintf("%s: unknown expression type %T", check.fset.Position(e.Pos()), e))
}
// everything went well
x.expr = e
return expression
Error:
x.mode = invalid
x.expr = e
return statement // avoid follow-up errors
}
// typeAssertion checks that x.(T) is legal; xtyp must be the type of x.
func (check *Checker) typeAssertion(pos token.Pos, x *operand, xtyp *Interface, T Type) {
method, wrongType := assertableTo(xtyp, T)
if method == nil {
return
}
var msg string
if wrongType {
msg = "wrong type for method"
} else {
msg = "missing method"
}
check.errorf(pos, "%s cannot have dynamic type %s (%s %s)", x, T, msg, method.name)
}
// expr typechecks expression e and initializes x with the expression value.
// If an error occurred, x.mode is set to invalid.
//
func (check *Checker) expr(x *operand, e ast.Expr) {
check.rawExpr(x, e, nil)
var msg string
switch x.mode {
default:
return
case novalue:
msg = "used as value"
case builtin:
msg = "must be called"
case typexpr:
msg = "is not an expression"
}
check.errorf(x.pos(), "%s %s", x, msg)
x.mode = invalid
}
// exprWithHint typechecks expression e and initializes x with the expression value.
// If an error occurred, x.mode is set to invalid.
// If hint != nil, it is the type of a composite literal element.
//
func (check *Checker) exprWithHint(x *operand, e ast.Expr, hint Type) {
assert(hint != nil)
check.rawExpr(x, e, hint)
var msg string
switch x.mode {
default:
return
case novalue:
msg = "used as value"
case builtin:
msg = "must be called"
case typexpr:
msg = "is not an expression"
}
check.errorf(x.pos(), "%s %s", x, msg)
x.mode = invalid
}
// exprOrType typechecks expression or type e and initializes x with the expression value or type.
// If an error occurred, x.mode is set to invalid.
//
func (check *Checker) exprOrType(x *operand, e ast.Expr) {
check.rawExpr(x, e, nil)
if x.mode == novalue {
check.errorf(x.pos(), "%s used as value or type", x)
x.mode = invalid
}
}
| third_party/golang/go/types/expr.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0003430671931710094,
0.00017319784092251211,
0.00016460195183753967,
0.00017102941637858748,
0.000014739465768798254
] |
{
"id": 3,
"code_window": [
"\treturn &ProxyServerConfig{\n",
"\t\tKubeProxyConfiguration: componentconfig.KubeProxyConfiguration{\n",
"\t\t\tBindAddress: \"0.0.0.0\",\n",
"\t\t\tHealthzPort: 10249,\n",
"\t\t\tHealthzBindAddress: \"127.0.0.1\",\n",
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n",
"\t}\n",
"}\n",
"\n",
"// AddFlags adds flags for a specific ProxyServer to the specified FlagSet\n",
"func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tKubeProxyConfiguration: config,\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 51
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags for initializing a proxy.
package options
import (
_ "net/http/pprof"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util"
"github.com/spf13/pflag"
)
const (
ExperimentalProxyModeAnnotation = "net.experimental.kubernetes.io/proxy-mode"
)
// ProxyServerConfig configures and runs a Kubernetes proxy server
type ProxyServerConfig struct {
componentconfig.KubeProxyConfiguration
ResourceContainer string
KubeAPIQPS float32
KubeAPIBurst int
ConfigSyncPeriod time.Duration
CleanupAndExit bool
NodeRef *api.ObjectReference
Master string
Kubeconfig string
}
func NewProxyConfig() *ProxyServerConfig {
return &ProxyServerConfig{
KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
BindAddress: "0.0.0.0",
HealthzPort: 10249,
HealthzBindAddress: "127.0.0.1",
OOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),
ResourceContainer: "/kube-proxy",
IPTablesSyncPeriod: unversioned.Duration{30 * time.Second},
UDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},
ConntrackMax: 256 * 1024, // 4x default (64k)
ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
},
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
}
}
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
}
| cmd/kube-proxy/app/options/options.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.40141743421554565,
0.05206490308046341,
0.00016183704428840429,
0.007070794701576233,
0.11750168353319168
] |
{
"id": 3,
"code_window": [
"\treturn &ProxyServerConfig{\n",
"\t\tKubeProxyConfiguration: componentconfig.KubeProxyConfiguration{\n",
"\t\t\tBindAddress: \"0.0.0.0\",\n",
"\t\t\tHealthzPort: 10249,\n",
"\t\t\tHealthzBindAddress: \"127.0.0.1\",\n",
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n",
"\t}\n",
"}\n",
"\n",
"// AddFlags adds flags for a specific ProxyServer to the specified FlagSet\n",
"func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tKubeProxyConfiguration: config,\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 51
} | /*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package transport
import (
"bytes"
"errors"
"io"
"math"
"net"
"strconv"
"sync"
"github.com/bradfitz/http2"
"github.com/bradfitz/http2/hpack"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
)
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
conn net.Conn
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
// writableChan synchronizes write access to the transport.
// A writer acquires the write lock by sending a value on writableChan
// and releases it by receiving from writableChan.
writableChan chan int
// shutdownChan is closed when Close is called.
// Blocking operations should select on shutdownChan to avoid
// blocking forever after Close.
shutdownChan chan struct{}
framer *framer
hBuf *bytes.Buffer // the buffer for HPACK encoding
hEnc *hpack.Encoder // HPACK encoder
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *recvBuffer
fc *inFlow
// sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool
mu sync.Mutex // guard the following
state transportState
activeStreams map[uint32]*Stream
// the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
framer := newFramer(conn)
// Send initial settings as connection preface to client.
var settings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams})
}
if initialWindowSize != defaultWindowSize {
settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
}
if err := framer.writeSettings(true, settings...); err != nil {
return nil, ConnectionErrorf("transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
return nil, ConnectionErrorf("transport: %v", err)
}
}
var buf bytes.Buffer
t := &http2Server{
conn: conn,
authInfo: authInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
state: reachable,
writableChan: make(chan int, 1),
shutdownChan: make(chan struct{}),
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
}
go t.controller()
t.writableChan <- 0
return t, nil
}
// operateHeader takes action on the decoded headers. It returns the current
// stream if there are remaining headers on the wire (in the following
// Continuation frame).
func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) {
defer func() {
if pendingStream == nil {
hDec.state = decodeState{}
}
}()
endHeaders, err := hDec.decodeServerHTTP2Headers(frame)
if s == nil {
// s has been closed.
return nil
}
if err != nil {
grpclog.Printf("transport: http2Server.operateHeader found %v", err)
if se, ok := err.(StreamError); ok {
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
}
return nil
}
if endStream {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
if !endHeaders {
return s
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
return nil
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return nil
}
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[s.id] = s
t.mu.Unlock()
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
}
if hDec.state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)
} else {
s.ctx, s.cancel = context.WithCancel(context.TODO())
}
// Attach Auth info if there is any.
if t.authInfo != nil {
s.ctx = credentials.NewContext(s.ctx, t.authInfo)
}
// Cache the current stream to the context so that the server application
// can find out. Required when the server wants to send some metadata
// back to the client (unary call only).
s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(hDec.state.mdata) > 0 {
s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)
}
s.dec = &recvBufferReader{
ctx: s.ctx,
recv: s.buf,
}
s.method = hDec.state.method
wg.Add(1)
go func() {
handle(s)
wg.Done()
}()
return nil
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
func (t *http2Server) HandleStreams(handle func(*Stream)) {
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
t.Close()
return
}
if !bytes.Equal(preface, clientPreface) {
grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
t.Close()
return
}
frame, err := t.framer.readFrame()
if err != nil {
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
t.Close()
return
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
t.Close()
return
}
t.handleSettings(sf)
hDec := newHPACKDecoder()
var curStream *Stream
var wg sync.WaitGroup
defer wg.Wait()
for {
frame, err := t.framer.readFrame()
if err != nil {
t.Close()
return
}
switch frame := frame.(type) {
case *http2.HeadersFrame:
id := frame.Header().StreamID
if id%2 != 1 || id <= t.maxStreamID {
// illegal gRPC stream id.
grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id)
t.Close()
break
}
t.maxStreamID = id
buf := newRecvBuffer()
fc := &inFlow{
limit: initialWindowSize,
conn: t.fc,
}
curStream = &Stream{
id: frame.Header().StreamID,
st: t,
buf: buf,
fc: fc,
}
endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg)
case *http2.ContinuationFrame:
curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg)
case *http2.DataFrame:
t.handleData(frame)
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
t.handleSettings(frame)
case *http2.PingFrame:
t.handlePing(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
case *http2.GoAwayFrame:
break
default:
grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
}
}
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
t.mu.Lock()
defer t.mu.Unlock()
if t.activeStreams == nil {
// The transport is closing.
return nil, false
}
s, ok := t.activeStreams[f.Header().StreamID]
if !ok {
// The stream is already done.
return nil, false
}
return s, true
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
func (t *http2Server) updateWindow(s *Stream, n uint32) {
swu, cwu := s.fc.onRead(n)
if swu > 0 {
t.controlBuf.put(&windowUpdate{s.id, swu})
}
if cwu > 0 {
t.controlBuf.put(&windowUpdate{0, cwu})
}
}
func (t *http2Server) handleData(f *http2.DataFrame) {
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
return
}
size := len(f.Data())
if size > 0 {
if err := s.fc.onData(uint32(size)); err != nil {
if _, ok := err.(ConnectionError); ok {
grpclog.Printf("transport: http2Server %v", err)
t.Close()
return
}
t.closeStream(s)
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return
}
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
data := make([]byte, size)
copy(data, f.Data())
s.write(recvMsg{data: data})
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client.
s.mu.Lock()
if s.state != streamDone {
if s.state == streamWriteDone {
s.state = streamDone
} else {
s.state = streamReadDone
}
}
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
}
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
s, ok := t.getStream(f)
if !ok {
return
}
t.closeStream(s)
}
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
var ss []http2.Setting
f.ForeachSetting(func(s http2.Setting) error {
ss = append(ss, s)
return nil
})
// The settings will be applied once the ack is sent.
t.controlBuf.put(&settings{ack: true, ss: ss})
}
func (t *http2Server) handlePing(f *http2.PingFrame) {
t.controlBuf.put(&ping{true})
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
id := f.Header().StreamID
incr := f.Increment
if id == 0 {
t.sendQuotaPool.add(int(incr))
return
}
if s, ok := t.getStream(f); ok {
s.sendQuotaPool.add(int(incr))
}
}
func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
first := true
endHeaders := false
var err error
// Sends the headers in a single batch.
for !endHeaders {
size := t.hBuf.Len()
if size > http2MaxFrameLen {
size = http2MaxFrameLen
} else {
endHeaders = true
}
if first {
p := http2.HeadersFrameParam{
StreamID: s.id,
BlockFragment: b.Next(size),
EndStream: endStream,
EndHeaders: endHeaders,
}
err = t.framer.writeHeaders(endHeaders, p)
first = false
} else {
err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
}
if err != nil {
t.Close()
return ConnectionErrorf("transport: %v", err)
}
}
return nil
}
// WriteHeader sends the header metedata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
s.mu.Lock()
if s.headerOk || s.state == streamDone {
s.mu.Unlock()
return ErrIllegalHeaderWrite
}
s.headerOk = true
s.mu.Unlock()
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
return err
}
t.hBuf.Reset()
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
for k, v := range md {
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
return err
}
t.writableChan <- 0
return nil
}
// WriteStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
s.mu.RLock()
if s.state == streamDone {
s.mu.RUnlock()
return nil
}
s.mu.RUnlock()
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
return err
}
t.hBuf.Reset()
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(
hpack.HeaderField{
Name: "grpc-status",
Value: strconv.Itoa(int(statusCode)),
})
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc})
// Attach the trailer metadata.
for k, v := range s.trailer {
for _, entry := range v {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
t.Close()
return err
}
t.closeStream(s)
t.writableChan <- 0
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
// TODO(zhaoq): Support multi-writers for a single stream.
var writeHeaderFrame bool
s.mu.Lock()
if !s.headerOk {
writeHeaderFrame = true
s.headerOk = true
}
s.mu.Unlock()
if writeHeaderFrame {
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
return err
}
t.hBuf.Reset()
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
p := http2.HeadersFrameParam{
StreamID: s.id,
BlockFragment: t.hBuf.Bytes(),
EndHeaders: true,
}
if err := t.framer.writeHeaders(false, p); err != nil {
t.Close()
return ConnectionErrorf("transport: %v", err)
}
t.writableChan <- 0
}
r := bytes.NewBuffer(data)
for {
if r.Len() == 0 {
return nil
}
size := http2MaxFrameLen
s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil {
return err
}
t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil {
if _, ok := err.(StreamError); ok {
t.sendQuotaPool.cancel()
}
return err
}
if sq < size {
size = sq
}
if tq < size {
size = tq
}
p := r.Next(size)
ps := len(p)
if ps < sq {
// Overbooked stream quota. Return it back.
s.sendQuotaPool.add(sq - ps)
}
if ps < tq {
// Overbooked transport quota. Return it back.
t.sendQuotaPool.add(tq - ps)
}
t.framer.adjustNumWriters(1)
// Got some quota. Try to acquire writing privilege on the
// transport.
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
if t.framer.adjustNumWriters(-1) == 0 {
// This writer is the last one in this batch and has the
// responsibility to flush the buffered frames. It queues
// a flush request to controlBuf instead of flushing directly
// in order to avoid the race with other writing or flushing.
t.controlBuf.put(&flushIO{})
}
return err
}
var forceFlush bool
if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
forceFlush = true
}
if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
t.Close()
return ConnectionErrorf("transport: %v", err)
}
if t.framer.adjustNumWriters(-1) == 0 {
t.framer.flushWrite()
}
t.writableChan <- 0
}
}
func (t *http2Server) applySettings(ss []http2.Setting) {
for _, s := range ss {
if s.ID == http2.SettingInitialWindowSize {
t.mu.Lock()
defer t.mu.Unlock()
for _, stream := range t.activeStreams {
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
}
t.streamSendQuota = s.Val
}
}
}
// controller running in a separate goroutine takes charge of sending control
// frames (e.g., window update, reset stream, setting, etc.) to the server.
func (t *http2Server) controller() {
for {
select {
case i := <-t.controlBuf.get():
t.controlBuf.load()
select {
case <-t.writableChan:
switch i := i.(type) {
case *windowUpdate:
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
case *settings:
if i.ack {
t.framer.writeSettingsAck(true)
t.applySettings(i.ss)
} else {
t.framer.writeSettings(true, i.ss...)
}
case *resetStream:
t.framer.writeRSTStream(true, i.streamID, i.code)
case *flushIO:
t.framer.flushWrite()
case *ping:
// TODO(zhaoq): Ack with all-0 data now. will change to some
// meaningful content when this is actually in use.
t.framer.writePing(true, i.ack, [8]byte{})
default:
grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
}
t.writableChan <- 0
continue
case <-t.shutdownChan:
return
}
case <-t.shutdownChan:
return
}
}
}
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() (err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return errors.New("transport: Close() was already called")
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
close(t.shutdownChan)
err = t.conn.Close()
// Notify all active streams.
for _, s := range streams {
s.write(recvMsg{err: ErrConnClosing})
}
return
}
// closeStream clears the footprint of a stream when the stream is not needed
// any more.
func (t *http2Server) closeStream(s *Stream) {
t.mu.Lock()
delete(t.activeStreams, s.id)
t.mu.Unlock()
if q := s.fc.restoreConn(); q > 0 {
t.controlBuf.put(&windowUpdate{0, q})
}
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
return
}
s.state = streamDone
s.mu.Unlock()
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), the caller needs
// to call cancel on the stream to interrupt the blocking on
// other goroutines.
s.cancel()
}
| Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.010199576616287231,
0.0011626241030171514,
0.0001706563780317083,
0.00045251293340697885,
0.0019514664309099317
] |
{
"id": 3,
"code_window": [
"\treturn &ProxyServerConfig{\n",
"\t\tKubeProxyConfiguration: componentconfig.KubeProxyConfiguration{\n",
"\t\t\tBindAddress: \"0.0.0.0\",\n",
"\t\t\tHealthzPort: 10249,\n",
"\t\t\tHealthzBindAddress: \"127.0.0.1\",\n",
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n",
"\t}\n",
"}\n",
"\n",
"// AddFlags adds flags for a specific ProxyServer to the specified FlagSet\n",
"func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tKubeProxyConfiguration: config,\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 51
} | #!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
function grab_profiles_from_component {
local requested_profiles=$1
local mem_pprof_flags=$2
local binary=$3
local tunnel_port=$4
local path=$5
local output_prefix=$6
local timestamp=$7
echo "binary: $binary"
for profile in ${requested_profiles}; do
case ${profile} in
cpu)
go tool pprof "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/profile" > "${output_prefix}-${profile}-profile-${timestamp}.pdf"
;;
mem)
# There are different kinds of memory profiles that are available that
# had to be grabbed separately: --inuse-space, --inuse-objects,
# --alloc-space, --alloc-objects. We need to iterate over all requested
# kinds.
for flag in ${mem_pprof_flags}; do
go tool pprof "-${flag}" "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/heap" > "${output_prefix}-${profile}-${flag}-profile-${timestamp}.pdf"
done
;;
esac
done
}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
server_addr=""
kubelet_addreses=""
kubelet_binary=""
master_binary=""
scheduler_binary=""
scheduler_port="10251"
controller_manager_port="10252"
controller_manager_binary=""
requested_profiles=""
mem_pprof_flags=""
profile_components=""
output_dir="."
tunnel_port="${tunnel_port:-1234}"
args=$(getopt -o s:mho:k:c -l server:,master,heapster,output:,kubelet:,scheduler,controller-manager,help,inuse-space,inuse-objects,alloc-space,alloc-objects,cpu,kubelet-binary:,master-binary:,scheduler-binary:,controller-manager-binary:,scheduler-port:,controller-manager-port: -- "$@")
if [[ $? -ne 0 ]]; then
>&2 echo "Error in getopt"
exit 1
fi
HEAPSTER_VERSION="v0.18.2"
MASTER_PPROF_PATH=""
HEAPSTER_PPROF_PATH="/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster"
KUBELET_PPROF_PATH_PREFIX="/api/v1/proxy/nodes"
SCHEDULER_PPROF_PATH_PREFIX="/api/v1/proxy/namespaces/kube-system/pods/kube-scheduler"
CONTROLLER_MANAGER_PPROF_PATH_PREFIX="/api/v1/proxy/namespaces/kube-system/pods/kube-controller-manager"
eval set -- "${args}"
while true; do
case $1 in
-s|--server)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --server flag"
exit 1
fi
server_addr=$1
shift
;;
-m|--master)
shift
profile_components="master ${profile_components}"
;;
--master-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --master-binary flag"
exit 1
fi
master_binary=$1
shift
;;
-h|--heapster)
shift
profile_components="heapster ${profile_components}"
;;
-k|--kubelet)
shift
profile_components="kubelet ${profile_components}"
if [ -z "$1" ]; then
>&2 echo "empty argumet to --kubelet flag"
exit 1
fi
kubelet_addreses="$1 $kubelet_addreses"
shift
;;
--kubelet-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --kubelet-binary flag"
exit 1
fi
kubelet_binary=$1
shift
;;
--scheduler)
shift
profile_components="scheduler ${profile_components}"
;;
--scheduler-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --scheduler-binary flag"
exit 1
fi
scheduler_binary=$1
shift
;;
--scheduler-port)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --scheduler-port flag"
exit 1
fi
scheduler_port=$1
shift
;;
-c|--controller-manager)
shift
profile_components="controller-manager ${profile_components}"
;;
--controller-manager-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --controller-manager-binary flag"
exit 1
fi
controller_manager_binary=$1
shift
;;
--controller-manager-port)
shift
if [ -z "$1" ]; then
>&2 echo "empty argumet to --controller-manager-port flag"
exit 1
fi
controller-managerr_port=$1
shift
;;
-o|--output)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --output flag"
exit 1
fi
output_dir=$1
shift
;;
--inuse-space)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="inuse_space ${mem_pprof_flags}"
;;
--inuse-objects)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="inuse_objects ${mem_pprof_flags}"
;;
--alloc-space)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="alloc_space ${mem_pprof_flags}"
;;
--alloc-objects)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="alloc_objects ${mem_pprof_flags}"
;;
--cpu)
shift
requested_profiles="cpu ${requested_profiles}"
;;
--help)
shift
echo "Recognized options:
-o/--output,
-s/--server,
-m/--master,
-h/--heapster,
--inuse-space,
--inuse-objects,
--alloc-space,
--alloc-objects,
--cpu,
--help"
exit 0
;;
--)
shift
break;
;;
esac
done
if [[ -z "${server_addr}" ]]; then
>&2 echo "Server flag is required"
exit 1
fi
if [[ -z "${profile_components}" ]]; then
>&2 echo "Choose at least one component to profile"
exit 1
fi
if [[ -z "${requested_profiles}" ]]; then
>&2 echo "Choose at least one profiling option"
exit 1
fi
gcloud compute ssh "${server_addr}" --ssh-flag=-nN --ssh-flag=-L${tunnel_port}:localhost:8080 &
echo "Waiting for tunnel to be created..."
kube::util::wait_for_url http://localhost:${tunnel_port}/healthz
SSH_PID=$(pgrep -f "/usr/bin/ssh.*${tunnel_port}:localhost:8080")
kube::util::trap_add 'kill $SSH_PID' EXIT
kube::util::trap_add 'kill $SSH_PID' SIGTERM
requested_profiles=$(echo ${requested_profiles} | xargs -n1 | sort -u | xargs)
profile_components=$(echo ${profile_components} | xargs -n1 | sort -u | xargs)
kubelet_addreses=$(echo ${kubelet_addreses} | xargs -n1 | sort -u | xargs)
echo "requested profiles: ${requested_profiles}"
echo "flags for heap profile: ${mem_pprof_flags}"
timestamp=$(date +%Y%m%d%H%M%S)
binary=""
for component in ${profile_components}; do
case ${component} in
master)
path=${MASTER_PPROF_PATH}
binary=${master_binary}
;;
controller-manager)
path="${CONTROLLER_MANAGER_PPROF_PATH_PREFIX}-${server_addr}:${controller_manager_port}"
binary=${controller_manager_binary}
;;
scheduler)
path="${SCHEDULER_PPROF_PATH_PREFIX}-${server_addr}:${scheduler_port}"
binary=${scheduler_binary}
;;
heapster)
rm heapster
wget https://github.com/kubernetes/heapster/releases/download/${HEAPSTER_VERSION}/heapster
kube::util::trap_add 'rm -f heapster' EXIT
kube::util::trap_add 'rm -f heapster' SIGTERM
binary=heapster
path=${HEAPSTER_PPROF_PATH}
;;
kubelet)
path="${KUBELET_PPROF_PATH_PREFIX}"
if [[ -z "${kubelet_binary}" ]]; then
binary="${KUBE_ROOT}/_output/local/bin/linux/amd64/kubelet"
else
binary=${kubelet_binary}
fi
;;
esac
if [[ "${component}" == "kubelet" ]]; then
for node in $(echo ${kubelet_addreses} | sed 's/[,;]/\n/g'); do
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}/${node}" "${output_dir}/${component}" "${timestamp}"
done
else
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}" "${output_dir}/${component}" "${timestamp}"
fi
done
| hack/grab-profiles.sh | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.008774437010288239,
0.001695159007795155,
0.000174850836629048,
0.00044637409155257046,
0.002583183813840151
] |
{
"id": 3,
"code_window": [
"\treturn &ProxyServerConfig{\n",
"\t\tKubeProxyConfiguration: componentconfig.KubeProxyConfiguration{\n",
"\t\t\tBindAddress: \"0.0.0.0\",\n",
"\t\t\tHealthzPort: 10249,\n",
"\t\t\tHealthzBindAddress: \"127.0.0.1\",\n",
"\t\t\tOOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),\n",
"\t\t\tResourceContainer: \"/kube-proxy\",\n",
"\t\t\tIPTablesSyncPeriod: unversioned.Duration{30 * time.Second},\n",
"\t\t\tUDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},\n",
"\t\t\tConntrackMax: 256 * 1024, // 4x default (64k)\n",
"\t\t\tConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)\n",
"\t\t},\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n",
"\t}\n",
"}\n",
"\n",
"// AddFlags adds flags for a specific ProxyServer to the specified FlagSet\n",
"func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {\n"
],
"labels": [
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tKubeProxyConfiguration: config,\n",
"\t\tKubeAPIQPS: 5.0,\n",
"\t\tKubeAPIBurst: 10,\n",
"\t\tConfigSyncPeriod: 15 * time.Minute,\n"
],
"file_path": "cmd/kube-proxy/app/options/options.go",
"type": "replace",
"edit_start_line_idx": 51
} | #!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
kube::test::find_dirs() {
(
cd ${KUBE_ROOT}
find . -not \( \
\( \
-path './_artifacts/*' \
-o -path './_output/*' \
-o -path './_gopath/*' \
-o -path './Godeps/*' \
-o -path './contrib/podex/*' \
-o -path './output/*' \
-o -path './release/*' \
-o -path './target/*' \
-o -path './test/e2e/*' \
-o -path './test/e2e_node/*' \
-o -path './test/integration/*' \
-o -path './test/component/scheduler/perf/*' \
\) -prune \
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./||' | sort -u
)
}
# -covermode=atomic becomes default with -race in Go >=1.3
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection
KUBE_COVERMODE=${KUBE_COVERMODE:-atomic}
# How many 'go test' instances to run simultaneously when running tests in
# coverage mode.
KUBE_COVERPROCS=${KUBE_COVERPROCS:-4}
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
# Set to the goveralls binary path to report coverage results to Coveralls.io.
KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
# Lists of API Versions of each groups that should be tested, groups are
# separated by comma, lists are separated by semicolon. e.g.,
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1,metrics/v1alpha1"}
# once we have multiple group supports
# Run tests with the standard (registry) and a custom etcd prefix
# (kubernetes.io/registry).
KUBE_TEST_ETCD_PREFIXES=${KUBE_TEST_ETCD_PREFIXES:-"registry,kubernetes.io/registry"}
# Create a junit-style XML test report in this directory if set.
KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
# set.
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
kube::test::usage() {
kube::log::usage_from_stdin <<EOF
usage: $0 [OPTIONS] [TARGETS]
OPTIONS:
-p <number> : number of parallel workers, must be >= 1
-i <number> : number of times to run each test per worker, must be >= 1
EOF
}
isnum() {
[[ "$1" =~ ^[0-9]+$ ]]
}
iterations=1
parallel=1
while getopts "hp:i:" opt ; do
case $opt in
h)
kube::test::usage
exit 0
;;
p)
parallel="$OPTARG"
if ! isnum "${parallel}" || [[ "${parallel}" -le 0 ]]; then
kube::log::usage "'$0': argument to -p must be numeric and greater than 0"
kube::test::usage
exit 1
fi
;;
i)
iterations="$OPTARG"
if ! isnum "${iterations}" || [[ "${iterations}" -le 0 ]]; then
kube::log::usage "'$0': argument to -i must be numeric and greater than 0"
kube::test::usage
exit 1
fi
;;
?)
kube::test::usage
exit 1
;;
:)
kube::log::usage "Option -$OPTARG <value>"
kube::test::usage
exit 1
;;
esac
done
shift $((OPTIND - 1))
# Use eval to preserve embedded quoted strings.
eval "goflags=(${KUBE_GOFLAGS:-})"
eval "testargs=(${KUBE_TEST_ARGS:-})"
# Used to filter verbose test output.
go_test_grep_pattern=".*"
# The go-junit-report tool needs full test case information to produce a
# meaningful report.
if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then
goflags+=(-v)
# Show only summary lines by matching lines like "status package/test"
go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
fi
# Filter out arguments that start with "-" and move them to goflags.
testcases=()
for arg; do
if [[ "${arg}" == -* ]]; then
goflags+=("${arg}")
else
testcases+=("${arg}")
fi
done
if [[ ${#testcases[@]} -eq 0 ]]; then
testcases=($(kube::test::find_dirs))
fi
set -- "${testcases[@]+${testcases[@]}}"
junitFilenamePrefix() {
if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then
echo ""
return
fi
mkdir -p "${KUBE_JUNIT_REPORT_DIR}"
local KUBE_TEST_API_NO_SLASH="${KUBE_TEST_API//\//-}"
echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_NO_SLASH}_$(kube::util::sortable_date)"
}
produceJUnitXMLReport() {
local -r junit_filename_prefix=$1
if [[ -z "${junit_filename_prefix}" ]]; then
return
fi
local test_stdout_filenames
local junit_xml_filename
test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout)
junit_xml_filename="${junit_filename_prefix}.xml"
if ! command -v go-junit-report >/dev/null 2>&1; then
kube::log::error "go-junit-report not found; please install with " \
"go get -u github.com/jstemmer/go-junit-report"
return
fi
cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}"
if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then
rm ${test_stdout_filenames}
fi
kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}"
}
runTestIterations() {
local worker=$1
shift
kube::log::status "Worker ${worker}: Running ${iterations} times"
for arg; do
trap 'exit 1' SIGINT
local pkg=${KUBE_GO_PACKAGE}/${arg}
kube::log::status "${pkg}"
# keep going, even if there are failures
local pass=0
local count=0
for i in $(seq 1 ${iterations}); do
if go test "${goflags[@]:+${goflags[@]}}" \
${KUBE_RACE} ${KUBE_TIMEOUT} "${pkg}" \
"${testargs[@]:+${testargs[@]}}"; then
pass=$((pass + 1))
else
ITERATION_FAILURES=$((ITERATION_FAILURES + 1))
fi
count=$((count + 1))
done 2>&1
kube::log::status "Worker ${worker}: ${pass} / ${count} passed"
done
return 0
}
runTests() {
# TODO: this should probably be refactored to avoid code duplication with the
# coverage version.
if [[ $iterations -gt 1 ]]; then
ITERATION_FAILURES=0 # purposely non-local
if [[ $# -eq 0 ]]; then
set -- $(kube::test::find_dirs)
fi
for p in $(seq 1 ${parallel}); do
runTestIterations ${p} "$@" &
done
wait
if [[ ${ITERATION_FAILURES} -gt 0 ]]; then
return 1
fi
return 0
fi
local junit_filename_prefix
junit_filename_prefix=$(junitFilenamePrefix)
# If we're not collecting coverage, run all requested tests with one 'go test'
# command, which is much faster.
if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then
kube::log::status "Running tests without code coverage"
go test "${goflags[@]:+${goflags[@]}}" \
${KUBE_RACE} ${KUBE_TIMEOUT} "${@+${@/#/${KUBE_GO_PACKAGE}/}}" \
"${testargs[@]:+${testargs[@]}}" \
| tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \
| grep "${go_test_grep_pattern}" && rc=$? || rc=$?
produceJUnitXMLReport "${junit_filename_prefix}"
return ${rc}
fi
# Create coverage report directories.
cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API}/$(kube::util::sortable_date)"
cover_profile="coverage.out" # Name for each individual coverage profile
kube::log::status "Saving coverage output in '${cover_report_dir}'"
mkdir -p "${@+${@/#/${cover_report_dir}/}}"
# Run all specified tests, collecting coverage results. Go currently doesn't
# support collecting coverage across multiple packages at once, so we must issue
# separate 'go test' commands for each package and then combine at the end.
# To speed things up considerably, we can at least use xargs -P to run multiple
# 'go test' commands at once.
# To properly parse the test results if generating a JUnit test report, we
# must make sure the output from parallel runs is not mixed. To achieve this,
# we spawn a subshell for each parallel process, redirecting the output to
# separate files.
printf "%s\n" "${@}" | xargs -I{} -n1 -P${KUBE_COVERPROCS} \
bash -c "set -o pipefail; _pkg=\"{}\"; _pkg_out=\${_pkg//\//_}; \
go test ${goflags[@]:+${goflags[@]}} \
${KUBE_RACE} \
${KUBE_TIMEOUT} \
-cover -covermode=\"${KUBE_COVERMODE}\" \
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
\"${KUBE_GO_PACKAGE}/\${_pkg}\" \
${testargs[@]:+${testargs[@]}} \
| tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \
| grep \"${go_test_grep_pattern}\"" \
&& test_result=$? || test_result=$?
produceJUnitXMLReport "${junit_filename_prefix}"
COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out"
{
# The combined coverage profile needs to start with a line indicating which
# coverage mode was used (set, count, or atomic). This line is included in
# each of the coverage profiles generated when running 'go test -cover', but
# we strip these lines out when combining so that there's only one.
echo "mode: ${KUBE_COVERMODE}"
# Include all coverage reach data in the combined profile, but exclude the
# 'mode' lines, as there should be only one.
for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do
cat $x | grep -h -v "^mode:" || true
done
} >"${COMBINED_COVER_PROFILE}"
coverage_html_file="${cover_report_dir}/combined-coverage.html"
go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}"
kube::log::status "Combined coverage report: ${coverage_html_file}"
return ${test_result}
}
reportCoverageToCoveralls() {
if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then
kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}"
${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \
${CI_NAME:+"-service=${CI_NAME}"} \
${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \
|| true
fi
}
# Convert the CSVs to arrays.
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
IFS=',' read -a etcdPrefixes <<< "${KUBE_TEST_ETCD_PREFIXES}"
apiVersionsCount=${#apiVersions[@]}
etcdPrefixesCount=${#etcdPrefixes[@]}
for (( i=0, j=0; ; )); do
apiVersion=${apiVersions[i]}
etcdPrefix=${etcdPrefixes[j]}
echo "Running tests for APIVersion: $apiVersion with etcdPrefix: $etcdPrefix"
# KUBE_TEST_API sets the version of each group to be tested. KUBE_API_VERSIONS
# register the groups/versions as supported by k8s. So KUBE_API_VERSIONS
# needs to be the superset of KUBE_TEST_API.
KUBE_TEST_API="${apiVersion}" KUBE_API_VERSIONS="v1,extensions/v1beta1,componentconfig/v1alpha1,metrics/v1alpha1" ETCD_PREFIX=${etcdPrefix} runTests "$@"
i=${i}+1
j=${j}+1
if [[ i -eq ${apiVersionsCount} ]] && [[ j -eq ${etcdPrefixesCount} ]]; then
# All api versions and etcd prefixes tested.
break
fi
if [[ i -eq ${apiVersionsCount} ]]; then
# Use the last api version for remaining etcd prefixes.
i=${i}-1
fi
if [[ j -eq ${etcdPrefixesCount} ]]; then
# Use the last etcd prefix for remaining api versions.
j=${j}-1
fi
done
# We might run the tests for multiple versions, but we want to report only
# one of them to coveralls. Here we report coverage from the last run.
reportCoverageToCoveralls
| hack/test-go.sh | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.008553529158234596,
0.0006604497320950031,
0.00016419454186689109,
0.0002631865500006825,
0.0014318343019112945
] |
{
"id": 4,
"code_window": [
"\t\t\tif obj.OOMScoreAdj == nil {\n",
"\t\t\t\ttemp := int32(qos.KubeProxyOOMScoreAdj)\n",
"\t\t\t\tobj.OOMScoreAdj = &temp\n",
"\t\t\t}\n",
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\t\tif obj.ResourceContainer == \"\" {\n",
"\t\t\t\tobj.ResourceContainer = \"/kube-proxy\"\n",
"\t\t\t}\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "add",
"edit_start_line_idx": 42
} | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"time"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) {
scheme.AddDefaultingFuncs(
func(obj *KubeProxyConfiguration) {
if obj.BindAddress == "" {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10249
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.IPTablesSyncPeriod.Duration == 0 {
obj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}
}
},
)
}
| pkg/apis/componentconfig/v1alpha1/defaults.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.9976248145103455,
0.39965540170669556,
0.00017757741443347186,
0.0030996513087302446,
0.48806652426719666
] |
{
"id": 4,
"code_window": [
"\t\t\tif obj.OOMScoreAdj == nil {\n",
"\t\t\t\ttemp := int32(qos.KubeProxyOOMScoreAdj)\n",
"\t\t\t\tobj.OOMScoreAdj = &temp\n",
"\t\t\t}\n",
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\t\tif obj.ResourceContainer == \"\" {\n",
"\t\t\t\tobj.ResourceContainer = \"/kube-proxy\"\n",
"\t\t\t}\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "add",
"edit_start_line_idx": 42
} | package base
import (
"github.com/rackspace/gophercloud"
os "github.com/rackspace/gophercloud/openstack/cdn/v1/base"
)
// Get retrieves the home document, allowing the user to discover the
// entire API.
func Get(c *gophercloud.ServiceClient) os.GetResult {
return os.Get(c)
}
// Ping retrieves a ping to the server.
func Ping(c *gophercloud.ServiceClient) os.PingResult {
return os.Ping(c)
}
| Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0001709030766505748,
0.00016925449017435312,
0.00016760590369813144,
0.00016925449017435312,
0.0000016485864762216806
] |
{
"id": 4,
"code_window": [
"\t\t\tif obj.OOMScoreAdj == nil {\n",
"\t\t\t\ttemp := int32(qos.KubeProxyOOMScoreAdj)\n",
"\t\t\t\tobj.OOMScoreAdj = &temp\n",
"\t\t\t}\n",
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\t\tif obj.ResourceContainer == \"\" {\n",
"\t\t\t\tobj.ResourceContainer = \"/kube-proxy\"\n",
"\t\t\t}\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "add",
"edit_start_line_idx": 42
} |
# HELP metric one
# HELP metric two
| Godeps/_workspace/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.00016762716404628009,
0.00016762716404628009,
0.00016762716404628009,
0.00016762716404628009,
0
] |
{
"id": 4,
"code_window": [
"\t\t\tif obj.OOMScoreAdj == nil {\n",
"\t\t\t\ttemp := int32(qos.KubeProxyOOMScoreAdj)\n",
"\t\t\t\tobj.OOMScoreAdj = &temp\n",
"\t\t\t}\n",
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep"
],
"after_edit": [
"\t\t\tif obj.ResourceContainer == \"\" {\n",
"\t\t\t\tobj.ResourceContainer = \"/kube-proxy\"\n",
"\t\t\t}\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "add",
"edit_start_line_idx": 42
} | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Unmarshal's a Containers description json file. The json file contains
// an array of ContainerHint structs, each with a container's id and networkInterface
// This allows collecting stats about network interfaces configured outside docker
// and lxc
package raw
import (
"encoding/json"
"flag"
"io/ioutil"
"os"
)
var argContainerHints = flag.String("container_hints", "/etc/cadvisor/container_hints.json", "location of the container hints file")
type containerHints struct {
AllHosts []containerHint `json:"all_hosts,omitempty"`
}
type containerHint struct {
FullName string `json:"full_path,omitempty"`
NetworkInterface *networkInterface `json:"network_interface,omitempty"`
Mounts []mount `json:"mounts,omitempty"`
}
type mount struct {
HostDir string `json:"host_dir,omitempty"`
ContainerDir string `json:"container_dir,omitempty"`
}
type networkInterface struct {
VethHost string `json:"veth_host,omitempty"`
VethChild string `json:"veth_child,omitempty"`
}
func getContainerHintsFromFile(containerHintsFile string) (containerHints, error) {
dat, err := ioutil.ReadFile(containerHintsFile)
if os.IsNotExist(err) {
return containerHints{}, nil
}
var cHints containerHints
if err == nil {
err = json.Unmarshal(dat, &cHints)
}
return cHints, err
}
| Godeps/_workspace/src/github.com/google/cadvisor/container/raw/container_hints.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.0001786969369277358,
0.00017269716772716492,
0.00016551572480238974,
0.00017236238636542112,
0.0000041257881093770266
] |
{
"id": 5,
"code_window": [
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n",
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}\n",
"\t\t\t}\n",
"\t\t},\n",
"\t)\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second}\n",
"\t\t\t}\n",
"\t\t\tzero := unversioned.Duration{}\n",
"\t\t\tif obj.UDPIdleTimeout == zero {\n",
"\t\t\t\tobj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond}\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackMax == 0 {\n",
"\t\t\t\tobj.ConntrackMax = 256 * 1024 // 4x default (64k)\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackTCPEstablishedTimeout == zero {\n",
"\t\t\t\tobj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "replace",
"edit_start_line_idx": 43
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags for initializing a proxy.
package options
import (
_ "net/http/pprof"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/util"
"github.com/spf13/pflag"
)
const (
ExperimentalProxyModeAnnotation = "net.experimental.kubernetes.io/proxy-mode"
)
// ProxyServerConfig configures and runs a Kubernetes proxy server
type ProxyServerConfig struct {
componentconfig.KubeProxyConfiguration
ResourceContainer string
KubeAPIQPS float32
KubeAPIBurst int
ConfigSyncPeriod time.Duration
CleanupAndExit bool
NodeRef *api.ObjectReference
Master string
Kubeconfig string
}
func NewProxyConfig() *ProxyServerConfig {
return &ProxyServerConfig{
KubeProxyConfiguration: componentconfig.KubeProxyConfiguration{
BindAddress: "0.0.0.0",
HealthzPort: 10249,
HealthzBindAddress: "127.0.0.1",
OOMScoreAdj: util.IntPtr(qos.KubeProxyOOMScoreAdj),
ResourceContainer: "/kube-proxy",
IPTablesSyncPeriod: unversioned.Duration{30 * time.Second},
UDPIdleTimeout: unversioned.Duration{250 * time.Millisecond},
ConntrackMax: 256 * 1024, // 4x default (64k)
ConntrackTCPEstablishedTimeout: unversioned.Duration{Duration: 24 * time.Hour}, // 1 day (1/5 default)
},
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
}
}
// AddFlags adds flags for a specific ProxyServer to the specified FlagSet
func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Var(componentconfig.IPVar{&s.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.IntVar(&s.HealthzPort, "healthz-port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.")
fs.Var(componentconfig.IPVar{&s.HealthzBindAddress}, "healthz-bind-address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
fs.IntVar(s.OOMScoreAdj, "oom-score-adj", util.IntPtrDerefOr(s.OOMScoreAdj, qos.KubeProxyOOMScoreAdj), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
fs.Var(componentconfig.PortRangeVar{&s.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.")
fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.")
fs.Var(&s.Mode, "proxy-mode", "Which proxy mode to use: 'userspace' (older) or 'iptables' (faster). If blank, look at the Node object on the Kubernetes API and respect the '"+ExperimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently iptables). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.")
fs.DurationVar(&s.IPTablesSyncPeriod.Duration, "iptables-sync-period", s.IPTablesSyncPeriod.Duration, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.")
fs.DurationVar(&s.ConfigSyncPeriod, "config-sync-period", s.ConfigSyncPeriod, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
fs.IntVar(&s.ConntrackMax, "conntrack-max", s.ConntrackMax, "Maximum number of NAT connections to track (0 to leave as-is)")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
}
| cmd/kube-proxy/app/options/options.go | 1 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.011984506621956825,
0.002285551279783249,
0.00016866449732333422,
0.0002878934028558433,
0.004089080262929201
] |
{
"id": 5,
"code_window": [
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n",
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}\n",
"\t\t\t}\n",
"\t\t},\n",
"\t)\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second}\n",
"\t\t\t}\n",
"\t\t\tzero := unversioned.Duration{}\n",
"\t\t\tif obj.UDPIdleTimeout == zero {\n",
"\t\t\t\tobj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond}\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackMax == 0 {\n",
"\t\t\t\tobj.ConntrackMax = 256 * 1024 // 4x default (64k)\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackTCPEstablishedTimeout == zero {\n",
"\t\t\t\tobj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "replace",
"edit_start_line_idx": 43
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strings"
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
kubectltesting "k8s.io/kubernetes/pkg/kubectl/testing"
"k8s.io/kubernetes/pkg/runtime"
yamlserializer "k8s.io/kubernetes/pkg/runtime/serializer/yaml"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/ghodss/yaml"
)
func init() {
api.Scheme.AddKnownTypes(testapi.Default.InternalGroupVersion(), &kubectltesting.TestStruct{})
api.Scheme.AddKnownTypes(*testapi.Default.GroupVersion(), &kubectltesting.TestStruct{})
}
var testData = kubectltesting.TestStruct{
Key: "testValue",
Map: map[string]int{"TestSubkey": 1},
StringList: []string{"a", "b", "c"},
IntList: []int{1, 2, 3},
}
func TestVersionedPrinter(t *testing.T) {
original := &kubectltesting.TestStruct{Key: "value"}
p := NewVersionedPrinter(
ResourcePrinterFunc(func(obj runtime.Object, w io.Writer) error {
if obj == original {
t.Fatalf("object should not be identical: %#v", obj)
}
if obj.(*kubectltesting.TestStruct).Key != "value" {
t.Fatalf("object was not converted: %#v", obj)
}
return nil
}),
api.Scheme,
*testapi.Default.GroupVersion(),
)
if err := p.PrintObj(original, nil); err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestPrintDefault(t *testing.T) {
printer, found, err := GetPrinter("", "")
if err != nil {
t.Fatalf("unexpected error: %#v", err)
}
if found {
t.Errorf("no printer should have been found: %#v / %v", printer, err)
}
}
type TestPrintType struct {
Data string
}
func (obj *TestPrintType) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind }
type TestUnknownType struct{}
func (obj *TestUnknownType) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind }
func TestPrinter(t *testing.T) {
//test inputs
simpleTest := &TestPrintType{"foo"}
podTest := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
podListTest := &api.PodList{
Items: []api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "foo"}},
{ObjectMeta: api.ObjectMeta{Name: "bar"}},
},
}
emptyListTest := &api.PodList{}
testapi, err := api.Scheme.ConvertToVersion(podTest, testapi.Default.GroupVersion().String())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
printerTests := []struct {
Name string
Format string
FormatArgument string
Input runtime.Object
Expect string
}{
{"test json", "json", "", simpleTest, "{\n \"Data\": \"foo\"\n}\n"},
{"test yaml", "yaml", "", simpleTest, "Data: foo\n"},
{"test template", "template", "{{if .id}}{{.id}}{{end}}{{if .metadata.name}}{{.metadata.name}}{{end}}",
podTest, "foo"},
{"test jsonpath", "jsonpath", "{.metadata.name}", podTest, "foo"},
{"test jsonpath list", "jsonpath", "{.items[*].metadata.name}", podListTest, "foo bar"},
{"test jsonpath empty list", "jsonpath", "{.items[*].metadata.name}", emptyListTest, ""},
{"test name", "name", "", podTest, "pod/foo\n"},
{"emits versioned objects", "template", "{{.kind}}", testapi, "Pod"},
}
for _, test := range printerTests {
buf := bytes.NewBuffer([]byte{})
printer, found, err := GetPrinter(test.Format, test.FormatArgument)
if err != nil || !found {
t.Errorf("in %s, unexpected error: %#v", test.Name, err)
}
if err := printer.PrintObj(test.Input, buf); err != nil {
t.Errorf("in %s, unexpected error: %#v", test.Name, err)
}
if buf.String() != test.Expect {
t.Errorf("in %s, expect %q, got %q", test.Name, test.Expect, buf.String())
}
}
}
func TestBadPrinter(t *testing.T) {
badPrinterTests := []struct {
Name string
Format string
FormatArgument string
Error error
}{
{"empty template", "template", "", fmt.Errorf("template format specified but no template given")},
{"bad template", "template", "{{ .Name", fmt.Errorf("error parsing template {{ .Name, template: output:1: unclosed action\n")},
{"bad templatefile", "templatefile", "", fmt.Errorf("templatefile format specified but no template file given")},
{"bad jsonpath", "jsonpath", "{.Name", fmt.Errorf("error parsing jsonpath {.Name, unclosed action\n")},
}
for _, test := range badPrinterTests {
_, _, err := GetPrinter(test.Format, test.FormatArgument)
if err == nil || err.Error() != test.Error.Error() {
t.Errorf("in %s, expect %s, got %s", test.Name, test.Error, err)
}
}
}
func testPrinter(t *testing.T, printer ResourcePrinter, unmarshalFunc func(data []byte, v interface{}) error) {
buf := bytes.NewBuffer([]byte{})
err := printer.PrintObj(&testData, buf)
if err != nil {
t.Fatal(err)
}
var poutput kubectltesting.TestStruct
// Verify that given function runs without error.
err = unmarshalFunc(buf.Bytes(), &poutput)
if err != nil {
t.Fatal(err)
}
// Use real decode function to undo the versioning process.
poutput = kubectltesting.TestStruct{}
s := yamlserializer.NewDecodingSerializer(testapi.Default.Codec())
if err := runtime.DecodeInto(s, buf.Bytes(), &poutput); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(testData, poutput) {
t.Errorf("Test data and unmarshaled data are not equal: %v", util.ObjectDiff(poutput, testData))
}
obj := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo"},
}
buf.Reset()
printer.PrintObj(obj, buf)
var objOut api.Pod
// Verify that given function runs without error.
err = unmarshalFunc(buf.Bytes(), &objOut)
if err != nil {
t.Fatalf("unexpected error: %#v", err)
}
// Use real decode function to undo the versioning process.
objOut = api.Pod{}
if err := runtime.DecodeInto(s, buf.Bytes(), &objOut); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(obj, &objOut) {
t.Errorf("Unexpected inequality:\n%v", util.ObjectDiff(obj, &objOut))
}
}
func TestYAMLPrinter(t *testing.T) {
testPrinter(t, &YAMLPrinter{}, yaml.Unmarshal)
}
func TestJSONPrinter(t *testing.T) {
testPrinter(t, &JSONPrinter{}, json.Unmarshal)
}
func PrintCustomType(obj *TestPrintType, w io.Writer, options PrintOptions) error {
_, err := fmt.Fprintf(w, "%s", obj.Data)
return err
}
func ErrorPrintHandler(obj *TestPrintType, w io.Writer, options PrintOptions) error {
return fmt.Errorf("ErrorPrintHandler error")
}
func TestCustomTypePrinting(t *testing.T) {
columns := []string{"Data"}
printer := NewHumanReadablePrinter(false, false, false, false, false, []string{})
printer.Handler(columns, PrintCustomType)
obj := TestPrintType{"test object"}
buffer := &bytes.Buffer{}
err := printer.PrintObj(&obj, buffer)
if err != nil {
t.Fatalf("An error occurred printing the custom type: %#v", err)
}
expectedOutput := "Data\ntest object"
if buffer.String() != expectedOutput {
t.Errorf("The data was not printed as expected. Expected:\n%s\nGot:\n%s", expectedOutput, buffer.String())
}
}
func TestPrintHandlerError(t *testing.T) {
columns := []string{"Data"}
printer := NewHumanReadablePrinter(false, false, false, false, false, []string{})
printer.Handler(columns, ErrorPrintHandler)
obj := TestPrintType{"test object"}
buffer := &bytes.Buffer{}
err := printer.PrintObj(&obj, buffer)
if err == nil || err.Error() != "ErrorPrintHandler error" {
t.Errorf("Did not get the expected error: %#v", err)
}
}
func TestUnknownTypePrinting(t *testing.T) {
printer := NewHumanReadablePrinter(false, false, false, false, false, []string{})
buffer := &bytes.Buffer{}
err := printer.PrintObj(&TestUnknownType{}, buffer)
if err == nil {
t.Errorf("An error was expected from printing unknown type")
}
}
func TestTemplatePanic(t *testing.T) {
tmpl := `{{and ((index .currentState.info "foo").state.running.startedAt) .currentState.info.net.state.running.startedAt}}`
printer, err := NewTemplatePrinter([]byte(tmpl))
if err != nil {
t.Fatalf("tmpl fail: %v", err)
}
buffer := &bytes.Buffer{}
err = printer.PrintObj(&api.Pod{}, buffer)
if err == nil {
t.Fatalf("expected that template to crash")
}
if buffer.String() == "" {
t.Errorf("no debugging info was printed")
}
}
func TestNamePrinter(t *testing.T) {
tests := map[string]struct {
obj runtime.Object
expect string
}{
"singleObject": {
&api.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
"pod/foo\n"},
"List": {
&v1.List{
TypeMeta: unversioned.TypeMeta{
Kind: "List",
},
Items: []runtime.RawExtension{
{
RawJSON: []byte(`{"kind": "Pod", "apiVersion": "v1", "metadata": { "name": "foo"}}`),
},
{
RawJSON: []byte(`{"kind": "Pod", "apiVersion": "v1", "metadata": { "name": "bar"}}`),
},
},
},
"pod/foo\npod/bar\n"},
}
printer, _, _ := GetPrinter("name", "")
for name, item := range tests {
buff := &bytes.Buffer{}
err := printer.PrintObj(item.obj, buff)
if err != nil {
t.Errorf("%v: unexpected err: %v", name, err)
continue
}
got := buff.String()
if item.expect != got {
t.Errorf("%v: expected %v, got %v", name, item.expect, got)
}
}
}
func TestTemplateStrings(t *testing.T) {
// This unit tests the "exists" function as well as the template from update.sh
table := map[string]struct {
pod api.Pod
expect string
}{
"nilInfo": {api.Pod{}, "false"},
"emptyInfo": {api.Pod{Status: api.PodStatus{ContainerStatuses: []api.ContainerStatus{}}}, "false"},
"fooExists": {
api.Pod{
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
},
},
},
},
"false",
},
"barExists": {
api.Pod{
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "bar",
},
},
},
},
"false",
},
"bothExist": {
api.Pod{
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
},
{
Name: "bar",
},
},
},
},
"false",
},
"barValid": {
api.Pod{
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
},
{
Name: "bar",
State: api.ContainerState{
Running: &api.ContainerStateRunning{
StartedAt: unversioned.Time{},
},
},
},
},
},
},
"false",
},
"bothValid": {
api.Pod{
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
State: api.ContainerState{
Running: &api.ContainerStateRunning{
StartedAt: unversioned.Time{},
},
},
},
{
Name: "bar",
State: api.ContainerState{
Running: &api.ContainerStateRunning{
StartedAt: unversioned.Time{},
},
},
},
},
},
},
"true",
},
}
// The point of this test is to verify that the below template works.
tmpl := `{{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "foo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`
p, err := NewTemplatePrinter([]byte(tmpl))
if err != nil {
t.Fatalf("tmpl fail: %v", err)
}
printer := NewVersionedPrinter(p, api.Scheme, *testapi.Default.GroupVersion())
for name, item := range table {
buffer := &bytes.Buffer{}
err = printer.PrintObj(&item.pod, buffer)
if err != nil {
t.Errorf("%v: unexpected err: %v", name, err)
continue
}
actual := buffer.String()
if len(actual) == 0 {
actual = "false"
}
if e := item.expect; e != actual {
t.Errorf("%v: expected %v, got %v", name, e, actual)
}
}
}
func TestPrinters(t *testing.T) {
om := func(name string) api.ObjectMeta { return api.ObjectMeta{Name: name} }
templatePrinter, err := NewTemplatePrinter([]byte("{{.name}}"))
if err != nil {
t.Fatal(err)
}
templatePrinter2, err := NewTemplatePrinter([]byte("{{len .items}}"))
if err != nil {
t.Fatal(err)
}
jsonpathPrinter, err := NewJSONPathPrinter("{.metadata.name}")
if err != nil {
t.Fatal(err)
}
printers := map[string]ResourcePrinter{
"humanReadable": NewHumanReadablePrinter(true, false, false, false, false, []string{}),
"humanReadableHeaders": NewHumanReadablePrinter(false, false, false, false, false, []string{}),
"json": &JSONPrinter{},
"yaml": &YAMLPrinter{},
"template": templatePrinter,
"template2": templatePrinter2,
"jsonpath": jsonpathPrinter,
"name": &NamePrinter{
Typer: runtime.ObjectTyperToTyper(api.Scheme),
Decoder: api.Codecs.UniversalDecoder(),
},
}
objects := map[string]runtime.Object{
"pod": &api.Pod{ObjectMeta: om("pod")},
"emptyPodList": &api.PodList{},
"nonEmptyPodList": &api.PodList{Items: []api.Pod{{}}},
"endpoints": &api.Endpoints{
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}},
Ports: []api.EndpointPort{{Port: 8080}},
}}},
}
// map of printer name to set of objects it should fail on.
expectedErrors := map[string]sets.String{
"template2": sets.NewString("pod", "emptyPodList", "endpoints"),
"jsonpath": sets.NewString("emptyPodList", "nonEmptyPodList", "endpoints"),
}
for pName, p := range printers {
for oName, obj := range objects {
b := &bytes.Buffer{}
if err := p.PrintObj(obj, b); err != nil {
if set, found := expectedErrors[pName]; found && set.Has(oName) {
// expected error
continue
}
t.Errorf("printer '%v', object '%v'; error: '%v'", pName, oName, err)
}
}
}
}
func TestPrintEventsResultSorted(t *testing.T) {
// Arrange
printer := NewHumanReadablePrinter(false /* noHeaders */, false, false, false, false, []string{})
obj := api.EventList{
Items: []api.Event{
{
Source: api.EventSource{Component: "kubelet"},
Message: "Item 1",
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "scheduler"},
Message: "Item 2",
FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
{
Source: api.EventSource{Component: "kubelet"},
Message: "Item 3",
FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
},
}
buffer := &bytes.Buffer{}
// Act
err := printer.PrintObj(&obj, buffer)
// Assert
if err != nil {
t.Fatalf("An error occurred printing the EventList: %#v", err)
}
out := buffer.String()
VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t)
}
func TestPrintNodeStatus(t *testing.T) {
printer := NewHumanReadablePrinter(false, false, false, false, false, []string{})
table := []struct {
node api.Node
status string
}{
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo1"},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}},
},
status: "Ready",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo2"},
Spec: api.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}},
},
status: "Ready,SchedulingDisabled",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo3"},
Status: api.NodeStatus{Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
{Type: api.NodeReady, Status: api.ConditionTrue}}},
},
status: "Ready",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo4"},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}},
},
status: "NotReady",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo5"},
Spec: api.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}},
},
status: "NotReady,SchedulingDisabled",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo6"},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}},
},
status: "Unknown",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo7"},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}},
},
status: "Unknown",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo8"},
Spec: api.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}},
},
status: "Unknown,SchedulingDisabled",
},
{
node: api.Node{
ObjectMeta: api.ObjectMeta{Name: "foo9"},
Spec: api.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}},
},
status: "Unknown,SchedulingDisabled",
},
}
for _, test := range table {
buffer := &bytes.Buffer{}
err := printer.PrintObj(&test.node, buffer)
if err != nil {
t.Fatalf("An error occurred printing Node: %#v", err)
}
if !contains(strings.Fields(buffer.String()), test.status) {
t.Fatalf("Expect printing node %s with status %#v, got: %#v", test.node.Name, test.status, buffer.String())
}
}
}
func contains(fields []string, field string) bool {
for _, v := range fields {
if v == field {
return true
}
}
return false
}
func TestPrintHumanReadableService(t *testing.T) {
tests := []api.Service{
{
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: "LoadBalancer",
Ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
{
IP: "3.4.5.6",
},
},
},
},
},
{
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 8090,
Protocol: "UDP",
},
{
Port: 8000,
Protocol: "TCP",
},
},
},
},
{
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: "LoadBalancer",
Ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 8090,
Protocol: "UDP",
},
{
Port: 8000,
Protocol: "TCP",
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
},
},
},
},
{
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Type: "LoadBalancer",
Ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 8090,
Protocol: "UDP",
},
{
Port: 8000,
Protocol: "TCP",
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
{
IP: "3.4.5.6",
},
{
IP: "5.6.7.8",
Hostname: "host5678",
},
},
},
},
},
}
for _, svc := range tests {
buff := bytes.Buffer{}
printService(&svc, &buff, PrintOptions{false, false, false, false, false, []string{}})
output := string(buff.Bytes())
ip := svc.Spec.ClusterIP
if !strings.Contains(output, ip) {
t.Errorf("expected to contain ClusterIP %s, but doesn't: %s", ip, output)
}
for _, ingress := range svc.Status.LoadBalancer.Ingress {
ip = ingress.IP
if !strings.Contains(output, ip) {
t.Errorf("expected to contain ingress ip %s, but doesn't: %s", ip, output)
}
}
for _, port := range svc.Spec.Ports {
portSpec := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
if !strings.Contains(output, portSpec) {
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
}
}
// Each service should print on one line
if 1 != strings.Count(output, "\n") {
t.Errorf("expected a single newline, found %d", strings.Count(output, "\n"))
}
}
}
func TestPrintHumanReadableWithNamespace(t *testing.T) {
namespaceName := "testnamespace"
name := "test"
table := []struct {
obj runtime.Object
isNamespaced bool
}{
{
obj: &api.Pod{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
},
isNamespaced: true,
},
{
obj: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.ReplicationControllerSpec{
Replicas: 2,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{
"name": "foo",
"type": "production",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent,
},
},
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSDefault,
NodeSelector: map[string]string{
"baz": "blah",
},
},
},
},
},
isNamespaced: true,
},
{
obj: &api.Service{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.ServiceSpec{
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
},
},
},
},
isNamespaced: true,
},
{
obj: &api.Endpoints{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}},
Ports: []api.EndpointPort{{Port: 8080}},
},
}},
isNamespaced: true,
},
{
obj: &api.Namespace{
ObjectMeta: api.ObjectMeta{Name: name},
},
isNamespaced: false,
},
{
obj: &api.Secret{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
},
isNamespaced: true,
},
{
obj: &api.ServiceAccount{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Secrets: []api.ObjectReference{},
},
isNamespaced: true,
},
{
obj: &api.Node{
ObjectMeta: api.ObjectMeta{Name: name},
Status: api.NodeStatus{},
},
isNamespaced: false,
},
{
obj: &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.PersistentVolumeSpec{},
},
isNamespaced: false,
},
{
obj: &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.PersistentVolumeClaimSpec{},
},
isNamespaced: true,
},
{
obj: &api.Event{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Source: api.EventSource{Component: "kubelet"},
Message: "Item 1",
FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)),
Count: 1,
Type: api.EventTypeNormal,
},
isNamespaced: true,
},
{
obj: &api.LimitRange{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
},
isNamespaced: true,
},
{
obj: &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
},
isNamespaced: true,
},
{
obj: &api.ComponentStatus{
Conditions: []api.ComponentCondition{
{Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: ""},
},
},
isNamespaced: false,
},
}
for _, test := range table {
if test.isNamespaced {
// Expect output to include namespace when requested.
printer := NewHumanReadablePrinter(false, true, false, false, false, []string{})
buffer := &bytes.Buffer{}
err := printer.PrintObj(test.obj, buffer)
if err != nil {
t.Fatalf("An error occurred printing object: %#v", err)
}
matched := contains(strings.Fields(buffer.String()), fmt.Sprintf("%s", namespaceName))
if !matched {
t.Errorf("Expect printing object to contain namespace: %#v", test.obj)
}
} else {
// Expect error when trying to get all namespaces for un-namespaced object.
printer := NewHumanReadablePrinter(false, true, false, false, false, []string{})
buffer := &bytes.Buffer{}
err := printer.PrintObj(test.obj, buffer)
if err == nil {
t.Errorf("Expected error when printing un-namespaced type")
}
}
}
}
func TestPrintPod(t *testing.T) {
tests := []struct {
pod api.Pod
expect string
}{
{
// Test name, num of containers, restarts, container ready status
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test1"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
"test1\t1/2\tpodPhase\t6\t",
},
{
// Test container error overwrites pod phase
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test2"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3},
},
},
},
"test2\t1/2\tContainerWaitingReason\t6\t",
},
{
// Test the same as the above but with Terminated state and the first container overwrites the rest
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test3"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3},
{State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "ContainerTerminatedReason"}}, RestartCount: 3},
},
},
},
"test3\t0/2\tContainerWaitingReason\t6\t",
},
{
// Test ready is not enough for reporting running
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test4"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{Ready: true, RestartCount: 3},
},
},
},
"test4\t1/2\tpodPhase\t6\t",
},
{
// Test ready is not enough for reporting running
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test5"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Reason: "OutOfDisk",
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{Ready: true, RestartCount: 3},
},
},
},
"test5\t1/2\tOutOfDisk\t6\t",
},
}
buf := bytes.NewBuffer([]byte{})
for _, test := range tests {
printPod(&test.pod, buf, PrintOptions{false, false, false, true, false, []string{}})
// We ignore time
if !strings.HasPrefix(buf.String(), test.expect) {
t.Fatalf("Expected: %s, got: %s", test.expect, buf.String())
}
buf.Reset()
}
}
func TestPrintNonTerminatedPod(t *testing.T) {
tests := []struct {
pod api.Pod
expect string
}{
{
// Test pod phase Running should be printed
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test1"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: api.PodRunning,
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
"test1\t1/2\tRunning\t6\t",
},
{
// Test pod phase Pending should be printed
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test2"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: api.PodPending,
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
"test2\t1/2\tPending\t6\t",
},
{
// Test pod phase Unknown should be printed
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test3"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: api.PodUnknown,
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
"test3\t1/2\tUnknown\t6\t",
},
{
// Test pod phase Succeeded shouldn't be printed
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test4"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: api.PodSucceeded,
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
"",
},
{
// Test pod phase Failed shouldn't be printed
api.Pod{
ObjectMeta: api.ObjectMeta{Name: "test5"},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: api.PodFailed,
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{Ready: true, RestartCount: 3},
},
},
},
"",
},
}
buf := bytes.NewBuffer([]byte{})
for _, test := range tests {
printPod(&test.pod, buf, PrintOptions{false, false, false, false, false, []string{}})
// We ignore time
if !strings.HasPrefix(buf.String(), test.expect) {
t.Fatalf("Expected: %s, got: %s", test.expect, buf.String())
}
buf.Reset()
}
}
func TestPrintPodWithLabels(t *testing.T) {
tests := []struct {
pod api.Pod
labelColumns []string
startsWith string
endsWith string
}{
{
// Test name, num of containers, restarts, container ready status
api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "test1",
Labels: map[string]string{"col1": "asd", "COL2": "zxc"},
},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
[]string{"col1", "COL2"},
"test1\t1/2\tpodPhase\t6\t",
"\tasd\tzxc\n",
},
{
// Test name, num of containers, restarts, container ready status
api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "test1",
Labels: map[string]string{"col1": "asd", "COL2": "zxc"},
},
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
Status: api.PodStatus{
Phase: "podPhase",
ContainerStatuses: []api.ContainerStatus{
{Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}},
{RestartCount: 3},
},
},
},
[]string{},
"test1\t1/2\tpodPhase\t6\t",
"\n",
},
}
buf := bytes.NewBuffer([]byte{})
for _, test := range tests {
printPod(&test.pod, buf, PrintOptions{false, false, false, false, false, test.labelColumns})
// We ignore time
if !strings.HasPrefix(buf.String(), test.startsWith) || !strings.HasSuffix(buf.String(), test.endsWith) {
t.Fatalf("Expected to start with: %s and end with: %s, but got: %s", test.startsWith, test.endsWith, buf.String())
}
buf.Reset()
}
}
type stringTestList []struct {
name, got, exp string
}
func TestTranslateTimestamp(t *testing.T) {
tl := stringTestList{
{"a while from now", translateTimestamp(unversioned.Time{Time: time.Now().Add(2.1e9)}), "<invalid>"},
{"almost now", translateTimestamp(unversioned.Time{Time: time.Now().Add(1.9e9)}), "0s"},
{"now", translateTimestamp(unversioned.Time{Time: time.Now()}), "0s"},
{"unknown", translateTimestamp(unversioned.Time{}), "<unknown>"},
{"30 seconds ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-3e10)}), "30s"},
{"5 minutes ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-3e11)}), "5m"},
{"an hour ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-6e12)}), "1h"},
{"2 days ago", translateTimestamp(unversioned.Time{Time: time.Now().AddDate(0, 0, -2)}), "2d"},
{"months ago", translateTimestamp(unversioned.Time{Time: time.Now().AddDate(0, 0, -90)}), "90d"},
{"10 years ago", translateTimestamp(unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}), "10y"},
}
for _, test := range tl {
if test.got != test.exp {
t.Errorf("On %v, expected '%v', but got '%v'",
test.name, test.exp, test.got)
}
}
}
func TestPrintDeployment(t *testing.T) {
tests := []struct {
deployment extensions.Deployment
expect string
}{
{
extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: "test1",
CreationTimestamp: unversioned.Time{Time: time.Now().Add(1.9e9)},
},
Spec: extensions.DeploymentSpec{
Replicas: 5,
Template: api.PodTemplateSpec{
Spec: api.PodSpec{Containers: make([]api.Container, 2)},
},
},
Status: extensions.DeploymentStatus{
Replicas: 10,
UpdatedReplicas: 2,
AvailableReplicas: 1,
UnavailableReplicas: 4,
},
},
"test1\t5\t10\t2\t1\t0s\n",
},
}
buf := bytes.NewBuffer([]byte{})
for _, test := range tests {
printDeployment(&test.deployment, buf, PrintOptions{false, false, false, true, false, []string{}})
if buf.String() != test.expect {
t.Fatalf("Expected: %s, got: %s", test.expect, buf.String())
}
buf.Reset()
}
}
| pkg/kubectl/resource_printer_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.012165510095655918,
0.00043539711623452604,
0.00016181651153601706,
0.00016878379392437637,
0.0012351549230515957
] |
{
"id": 5,
"code_window": [
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n",
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}\n",
"\t\t\t}\n",
"\t\t},\n",
"\t)\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second}\n",
"\t\t\t}\n",
"\t\t\tzero := unversioned.Duration{}\n",
"\t\t\tif obj.UDPIdleTimeout == zero {\n",
"\t\t\t\tobj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond}\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackMax == 0 {\n",
"\t\t\t\tobj.ConntrackMax = 256 * 1024 // 4x default (64k)\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackTCPEstablishedTimeout == zero {\n",
"\t\t\t\tobj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "replace",
"edit_start_line_idx": 43
} | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util"
. "github.com/onsi/ginkgo"
)
var _ = Describe("ConfigMap", func() {
f := NewFramework("configmap")
It("should be consumable via environment variable [Conformance]", func() {
name := "configmap-test-" + string(util.NewUUID())
configMap := &api.ConfigMap{
ObjectMeta: api.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
defer func() {
By("Cleaning up the configMap")
if err := f.Client.ConfigMaps(f.Namespace.Name).Delete(configMap.Name); err != nil {
Failf("unable to delete configMap %v: %v", configMap.Name, err)
}
}()
var err error
if configMap, err = f.Client.ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-configmaps-" + string(util.NewUUID()),
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "env-test",
Image: "gcr.io/google_containers/busybox",
Command: []string{"sh", "-c", "env"},
Env: []api.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &api.EnvVarSource{
ConfigMapKeyRef: &api.ConfigMapKeySelector{
LocalObjectReference: api.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
testContainerOutput("consume configMaps", f.Client, pod, 0, []string{
"CONFIG_DATA_1=value-1",
}, f.Namespace.Name)
})
})
| test/e2e/configmap.go | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.00017747418314684182,
0.00017186725744977593,
0.00016773508104961365,
0.00017137089162133634,
0.00000345909256793675
] |
{
"id": 5,
"code_window": [
"\t\t\tif obj.IPTablesSyncPeriod.Duration == 0 {\n",
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{5 * time.Second}\n",
"\t\t\t}\n",
"\t\t},\n",
"\t)\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\tobj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second}\n",
"\t\t\t}\n",
"\t\t\tzero := unversioned.Duration{}\n",
"\t\t\tif obj.UDPIdleTimeout == zero {\n",
"\t\t\t\tobj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond}\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackMax == 0 {\n",
"\t\t\t\tobj.ConntrackMax = 256 * 1024 // 4x default (64k)\n",
"\t\t\t}\n",
"\t\t\tif obj.ConntrackTCPEstablishedTimeout == zero {\n",
"\t\t\t\tobj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)\n"
],
"file_path": "pkg/apis/componentconfig/v1alpha1/defaults.go",
"type": "replace",
"edit_start_line_idx": 43
} | # iSCSI target container for testing.
Inspired by https://github.com/rvykydal/dockerfile-iscsid
* The container needs /lib/modules from the host to insert appropriate
kernel modules for iscsi. This assumes that these modules are installed
on the host!
* The container needs to run with docker --privileged
block.tar.gz is a small ext2 filesystem created by `make block` (run as root!)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/images/volumes-tester/iscsi/README.md?pixel)]()
| test/images/volumes-tester/iscsi/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/ee4e60d78cf00229d7aab8886657e70e0d1a1d59 | [
0.00016822874022182077,
0.00016661790141370147,
0.00016500706260558218,
0.00016661790141370147,
0.000001610838808119297
] |
{
"id": 0,
"code_window": [
"apiVersion: apps/v1beta1\n",
"kind: StatefulSet\n",
"metadata:\n",
" # This name uniquely identifies the StatefulSet\n",
" name: minio\n",
"spec:\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-distributed-statefulset.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
# This name uniquely identifies the StatefulSet
name: minio
spec:
serviceName: minio
replicas: 4
selector:
matchLabels:
app: minio # has to match .spec.template.metadata.labels
template:
metadata:
labels:
app: minio # has to match .spec.selector.matchLabels
spec:
containers:
- name: minio
env:
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
image: minio/minio:RELEASE.2019-09-26T19-42-35Z
args:
- server
- http://minio-{0...3}.minio.default.svc.cluster.local/data
ports:
- containerPort: 9000
# These volume mounts are persistent. Each pod in the PetSet
# gets a volume mounted based on this field.
volumeMounts:
- name: data
mountPath: /data
# Liveness probe detects situations where MinIO server instance
# is not working properly and needs restart. Kubernetes automatically
# restarts the pods if liveness checks fail.
livenessProbe:
httpGet:
path: /minio/health/live
port: 9000
initialDelaySeconds: 120
periodSeconds: 20
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
| docs/orchestration/kubernetes/minio-distributed-statefulset.yaml | 1 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.992804229259491,
0.1659001260995865,
0.00017060301615856588,
0.0005380421062000096,
0.36980289220809937
] |
{
"id": 0,
"code_window": [
"apiVersion: apps/v1beta1\n",
"kind: StatefulSet\n",
"metadata:\n",
" # This name uniquely identifies the StatefulSet\n",
" name: minio\n",
"spec:\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-distributed-statefulset.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"path"
"strconv"
"time"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
)
var errConnectionStale = errors.New("connection stale, REST client/server instance-id mismatch")
// To abstract a disk over network.
type storageRESTServer struct {
storage *posix
// Used to detect reboot of servers so that peers revalidate format.json as
// different disk might be available on the same mount point after reboot.
instanceID string
}
func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(err.Error()))
w.(http.Flusher).Flush()
}
type bulkErrorsResponse struct {
Errs []error `json:"errors"`
}
func (s *storageRESTServer) writeErrorsResponse(w http.ResponseWriter, errs []error) {
resp := bulkErrorsResponse{Errs: make([]error, len(errs))}
for idx, err := range errs {
if err == nil {
continue
}
resp.Errs[idx] = err
}
gob.NewEncoder(w).Encode(resp)
w.(http.Flusher).Flush()
}
// DefaultSkewTime - skew time is 15 minutes between minio peers.
const DefaultSkewTime = 15 * time.Minute
// Authenticates storage client's requests and validates for skewed time.
func storageServerRequestValidate(r *http.Request) error {
_, owner, err := webRequestAuthenticate(r)
if err != nil {
return err
}
if !owner { // Disable access for non-admin users.
return errAuthentication
}
requestTimeStr := r.Header.Get("X-Minio-Time")
requestTime, err := time.Parse(time.RFC3339, requestTimeStr)
if err != nil {
return err
}
utcNow := UTCNow()
delta := requestTime.Sub(utcNow)
if delta < 0 {
delta = delta * -1
}
if delta > DefaultSkewTime {
return fmt.Errorf("client time %v is too apart with server time %v", requestTime, utcNow)
}
return nil
}
// IsValid - To authenticate and verify the time difference.
func (s *storageRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
if err := storageServerRequestValidate(r); err != nil {
s.writeErrorResponse(w, err)
return false
}
instanceID := r.URL.Query().Get(storageRESTInstanceID)
if instanceID != s.instanceID {
// This will cause the peer to revalidate format.json using a new storage-rest-client instance.
s.writeErrorResponse(w, errConnectionStale)
return false
}
return true
}
// GetInstanceID - returns the instance ID of the server.
func (s *storageRESTServer) GetInstanceID(w http.ResponseWriter, r *http.Request) {
if err := storageServerRequestValidate(r); err != nil {
s.writeErrorResponse(w, err)
return
}
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(s.instanceID)))
w.Write([]byte(s.instanceID))
w.(http.Flusher).Flush()
}
// DiskInfoHandler - returns disk info.
func (s *storageRESTServer) DiskInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
info, err := s.storage.DiskInfo()
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
gob.NewEncoder(w).Encode(info)
}
// MakeVolHandler - make a volume.
func (s *storageRESTServer) MakeVolHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
err := s.storage.MakeVol(volume)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// ListVolsHandler - list volumes.
func (s *storageRESTServer) ListVolsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
infos, err := s.storage.ListVols()
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
gob.NewEncoder(w).Encode(&infos)
}
// StatVolHandler - stat a volume.
func (s *storageRESTServer) StatVolHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
info, err := s.storage.StatVol(volume)
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
gob.NewEncoder(w).Encode(info)
}
// DeleteVolumeHandler - delete a volume.
func (s *storageRESTServer) DeleteVolHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
err := s.storage.DeleteVol(volume)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// AppendFileHandler - append data from the request to the file specified.
func (s *storageRESTServer) AppendFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
buf := make([]byte, r.ContentLength)
_, err := io.ReadFull(r.Body, buf)
if err != nil {
s.writeErrorResponse(w, err)
return
}
err = s.storage.AppendFile(volume, filePath, buf)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// CreateFileHandler - fallocate() space for a file and copy the contents from the request.
func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
fileSizeStr := vars[storageRESTLength]
fileSize, err := strconv.Atoi(fileSizeStr)
if err != nil {
s.writeErrorResponse(w, err)
return
}
err = s.storage.CreateFile(volume, filePath, int64(fileSize), r.Body)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// WriteAllHandler - write to file all content.
func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
if r.ContentLength < 0 {
s.writeErrorResponse(w, errInvalidArgument)
return
}
err := s.storage.WriteAll(volume, filePath, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
s.writeErrorResponse(w, err)
}
}
// StatFileHandler - stat a file.
func (s *storageRESTServer) StatFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
info, err := s.storage.StatFile(volume, filePath)
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
gob.NewEncoder(w).Encode(info)
}
// ReadAllHandler - read all the contents of a file.
func (s *storageRESTServer) ReadAllHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
buf, err := s.storage.ReadAll(volume, filePath)
if err != nil {
s.writeErrorResponse(w, err)
return
}
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
w.Write(buf)
w.(http.Flusher).Flush()
}
// ReadFileHandler - read section of a file.
func (s *storageRESTServer) ReadFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
offset, err := strconv.Atoi(vars[storageRESTOffset])
if err != nil {
s.writeErrorResponse(w, err)
return
}
length, err := strconv.Atoi(vars[storageRESTLength])
if err != nil {
s.writeErrorResponse(w, err)
return
}
if offset < 0 || length < 0 {
s.writeErrorResponse(w, errInvalidArgument)
return
}
var verifier *BitrotVerifier
if vars[storageRESTBitrotAlgo] != "" {
hashStr := vars[storageRESTBitrotHash]
var hash []byte
hash, err = hex.DecodeString(hashStr)
if err != nil {
s.writeErrorResponse(w, err)
return
}
verifier = NewBitrotVerifier(BitrotAlgorithmFromString(vars[storageRESTBitrotAlgo]), hash)
}
buf := make([]byte, length)
_, err = s.storage.ReadFile(volume, filePath, int64(offset), buf, verifier)
if err != nil {
s.writeErrorResponse(w, err)
return
}
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(buf)))
w.Write(buf)
w.(http.Flusher).Flush()
}
// ReadFileHandler - read section of a file.
func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
offset, err := strconv.Atoi(vars[storageRESTOffset])
if err != nil {
s.writeErrorResponse(w, err)
return
}
length, err := strconv.Atoi(vars[storageRESTLength])
if err != nil {
s.writeErrorResponse(w, err)
return
}
rc, err := s.storage.ReadFileStream(volume, filePath, int64(offset), int64(length))
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer rc.Close()
w.Header().Set(xhttp.ContentLength, strconv.Itoa(length))
io.Copy(w, rc)
w.(http.Flusher).Flush()
}
// readMetadata func provides the function types for reading leaf metadata.
type readMetadataFunc func(buf []byte, volume, entry string) FileInfo
func readMetadata(buf []byte, volume, entry string) FileInfo {
m, err := xlMetaV1UnmarshalJSON(context.Background(), buf)
if err != nil {
return FileInfo{}
}
return FileInfo{
Volume: volume,
Name: entry,
ModTime: m.Stat.ModTime,
Size: m.Stat.Size,
Metadata: m.Meta,
Parts: m.Parts,
Quorum: m.Erasure.DataBlocks,
}
}
// WalkHandler - remote caller to start walking at a requested directory path.
func (s *storageRESTServer) WalkHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
dirPath := vars[storageRESTDirPath]
markerPath := vars[storageRESTMarkerPath]
recursive, err := strconv.ParseBool(vars[storageRESTRecursive])
if err != nil {
s.writeErrorResponse(w, err)
return
}
leafFile := vars[storageRESTLeafFile]
endWalkCh := make(chan struct{})
defer close(endWalkCh)
fch, err := s.storage.Walk(volume, dirPath, markerPath, recursive, leafFile, readMetadata, endWalkCh)
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
encoder := gob.NewEncoder(w)
for fi := range fch {
encoder.Encode(&fi)
}
}
// ListDirHandler - list a directory.
func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
dirPath := vars[storageRESTDirPath]
leafFile := vars[storageRESTLeafFile]
count, err := strconv.Atoi(vars[storageRESTCount])
if err != nil {
s.writeErrorResponse(w, err)
return
}
entries, err := s.storage.ListDir(volume, dirPath, count, leafFile)
if err != nil {
s.writeErrorResponse(w, err)
return
}
defer w.(http.Flusher).Flush()
gob.NewEncoder(w).Encode(&entries)
}
// DeleteFileHandler - delete a file.
func (s *storageRESTServer) DeleteFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
err := s.storage.DeleteFile(volume, filePath)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// DeleteFileBulkHandler - delete a file.
func (s *storageRESTServer) DeleteFileBulkHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := r.URL.Query()
volume := vars.Get(storageRESTVolume)
filePaths := vars[storageRESTFilePath]
errs, err := s.storage.DeleteFileBulk(volume, filePaths)
if err != nil {
s.writeErrorResponse(w, err)
return
}
s.writeErrorsResponse(w, errs)
}
// RenameFileHandler - rename a file.
func (s *storageRESTServer) RenameFileHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
srcVolume := vars[storageRESTSrcVolume]
srcFilePath := vars[storageRESTSrcPath]
dstVolume := vars[storageRESTDstVolume]
dstFilePath := vars[storageRESTDstPath]
err := s.storage.RenameFile(srcVolume, srcFilePath, dstVolume, dstFilePath)
if err != nil {
s.writeErrorResponse(w, err)
}
}
// Send whitespace to the client to avoid timeouts as bitrot verification can take time on spinning/slow disks.
func sendWhiteSpaceVerifyFile(w http.ResponseWriter) <-chan struct{} {
doneCh := make(chan struct{})
go func() {
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
w.Write([]byte(" "))
w.(http.Flusher).Flush()
case doneCh <- struct{}{}:
ticker.Stop()
return
}
}
}()
return doneCh
}
// VerifyFileResp - VerifyFile()'s response.
type VerifyFileResp struct {
Err error
}
// VerifyFile - Verify the file for bitrot errors.
func (s *storageRESTServer) VerifyFile(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
vars := mux.Vars(r)
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]
size, err := strconv.ParseInt(vars[storageRESTLength], 10, 0)
if err != nil {
s.writeErrorResponse(w, err)
return
}
shardSize, err := strconv.Atoi(vars[storageRESTShardSize])
if err != nil {
s.writeErrorResponse(w, err)
return
}
hashStr := vars[storageRESTBitrotHash]
var hash []byte
if hashStr != "" {
hash, err = hex.DecodeString(hashStr)
if err != nil {
s.writeErrorResponse(w, err)
return
}
}
algoStr := vars[storageRESTBitrotAlgo]
if algoStr == "" {
s.writeErrorResponse(w, errInvalidArgument)
return
}
algo := BitrotAlgorithmFromString(algoStr)
w.Header().Set(xhttp.ContentType, "text/event-stream")
doneCh := sendWhiteSpaceVerifyFile(w)
err = s.storage.VerifyFile(volume, filePath, size, algo, hash, int64(shardSize))
<-doneCh
gob.NewEncoder(w).Encode(VerifyFileResp{err})
}
// registerStorageRPCRouter - register storage rpc router.
func registerStorageRESTHandlers(router *mux.Router, endpoints EndpointList) {
for _, endpoint := range endpoints {
if !endpoint.IsLocal {
continue
}
storage, err := newPosix(endpoint.Path)
if err != nil {
logger.Fatal(uiErrUnableToWriteInBackend(err), "Unable to initialize posix backend")
}
server := &storageRESTServer{storage, mustGetUUID()}
subrouter := router.PathPrefix(path.Join(storageRESTPath, endpoint.Path)).Subrouter()
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler))
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler))
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength, storageRESTBitrotAlgo, storageRESTBitrotHash)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)).
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)).
Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)).
Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTBitrotHash, storageRESTLength, storageRESTShardSize)...)
subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodGetInstanceID).HandlerFunc(httpTraceAll(server.GetInstanceID))
}
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
}
| cmd/storage-rest-server.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0005633055698126554,
0.0001839455362642184,
0.00016025705554056913,
0.00017209560610353947,
0.00006429340282920748
] |
{
"id": 0,
"code_window": [
"apiVersion: apps/v1beta1\n",
"kind: StatefulSet\n",
"metadata:\n",
" # This name uniquely identifies the StatefulSet\n",
" name: minio\n",
"spec:\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-distributed-statefulset.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | /*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"io/ioutil"
"math"
"math/rand"
"strconv"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Prepare XL/FS backend for benchmark.
func prepareBenchmarkBackend(instanceType string) (ObjectLayer, []string, error) {
return prepareTestBackend(instanceType)
}
// Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
var err error
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash(textData)
sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Benchmark utility functions for ObjectLayer.PutObjectPart().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObjectPart benchmark.
func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
var err error
// obtains random bucket name.
bucket := getRandomBucketName()
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
objSize := 128 * humanize.MiByte
// PutObjectPart returns etag of the object inserted.
// etag variable is assigned with that value.
var etag, uploadID string
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
sha256hex := ""
var textPartData []byte
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
for j := 0; j < totalPartsNR; j++ {
if j < totalPartsNR-1 {
textPartData = textData[j*partSize : (j+1)*partSize-1]
} else {
textPartData = textData[j*partSize:]
}
md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if partInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, etag, md5hex)
}
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectPartBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
textData := generateBytesData(objSize)
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// randomly picks a character and returns its equivalent byte array.
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// seeding the random number generator.
rand.Seed(UTCNow().UnixNano())
// pick a character randomly.
return []byte{letterBytes[rand.Intn(len(letterBytes))]}
}
// picks a random byte and repeats it to size bytes.
func generateBytesData(size int) []byte {
// repeat the random character chosen size
return bytes.Repeat(getRandomByte(), size)
}
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
objLayer, disks, err := prepareBenchmarkBackend(instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.ETag, md5hex)
}
i++
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
i++
if i == 10 {
i = 0
}
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}
| cmd/benchmark-utils_test.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0001790988171705976,
0.00017105216102208942,
0.00016493328439537436,
0.00017119977565016598,
0.0000029089528652548324
] |
{
"id": 0,
"code_window": [
"apiVersion: apps/v1beta1\n",
"kind: StatefulSet\n",
"metadata:\n",
" # This name uniquely identifies the StatefulSet\n",
" name: minio\n",
"spec:\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-distributed-statefulset.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | /*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/sync/errgroup"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
// make entries. Here is the general flow
// - Range over all the available buckets
// - Check if a bucket has an entry in etcd backend
// -- If no, make an entry
// -- If yes, check if the IP of entry matches local IP. This means entry is for this instance.
// -- If IP of the entry doesn't match, this means entry is for another instance. Log an error to console.
func initFederatorBackend(objLayer ObjectLayer) {
// Get buckets in the backend
b, err := objLayer.ListBuckets(context.Background())
if err != nil {
logger.LogIf(context.Background(), err)
return
}
// Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
logger.LogIf(context.Background(), err)
return
}
bucketSet := set.NewStringSet()
// Add buckets that are not registered with the DNS
g := errgroup.WithNErrs(len(b))
for index := range b {
index := index
bucketSet.Add(b[index].Name)
g.Go(func() error {
r, gerr := globalDNSConfig.Get(b[index].Name)
if gerr != nil {
if gerr == dns.ErrNoEntriesFound {
return globalDNSConfig.Put(b[index].Name)
}
return gerr
}
if globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
// There is already an entry for this bucket, with all IP addresses different. This indicates a bucket name collision. Log an error and continue.
return fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket. Use one of these IP addresses %v to access the bucket", b[index].Name, globalDomainIPs.ToSlice())
}
return nil
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(context.Background(), err)
}
}
g = errgroup.WithNErrs(len(dnsBuckets))
// Remove buckets that are in DNS for this server, but aren't local
for index := range dnsBuckets {
index := index
g.Go(func() error {
// This is a local bucket that exists, so we can continue
if bucketSet.Contains(dnsBuckets[index].Key) {
return nil
}
// This is not for our server, so we can continue
hostPort := net.JoinHostPort(dnsBuckets[index].Host, fmt.Sprintf("%d", dnsBuckets[index].Port))
if globalDomainIPs.Intersection(set.CreateStringSet(hostPort)).IsEmpty() {
return nil
}
// We go to here, so we know the bucket no longer exists, but is registered in DNS to this server
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
return fmt.Errorf("Failed to remove DNS entry for %s due to %v", dnsBuckets[index].Key, err)
}
return nil
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(context.Background(), err)
}
}
}
// GetBucketLocationHandler - GET Bucket location.
// -------------------------
// This operation returns bucket location.
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketLocation")
defer logger.AuditLog(w, r, "GetBucketLocation", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLocationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Generate response.
encodedSuccessResponse := encodeResponse(LocationResponse{})
// Get current region.
region := globalServerConfig.GetRegion()
if region != globalMinioDefaultRegion {
encodedSuccessResponse = encodeResponse(LocationResponse{
Location: region,
})
}
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// ListMultipartUploadsHandler - GET Bucket (List Multipart uploads)
// -------------------------
// This operation lists in-progress multipart uploads. An in-progress
// multipart upload is a multipart upload that has been initiated,
// using the Initiate Multipart Upload request, but has not yet been
// completed or aborted. This operation returns at most 1,000 multipart
// uploads in the response.
//
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListMultipartUploads")
defer logger.AuditLog(w, r, "ListMultipartUploads", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketMultipartUploadsAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode := getBucketMultipartResources(r.URL.Query())
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
if maxUploads < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxUploads), r.URL, guessIsBrowserReq(r))
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !hasPrefix(keyMarker, prefix) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
}
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// generate response
response := generateListMultipartUploadsResponse(bucket, listMultipartsInfo, encodingType)
encodedSuccessResponse := encodeResponse(response)
// write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// ListBucketsHandler - GET Service.
// -----------
// This implementation of the GET operation returns a list of all buckets
// owned by the authenticated sender of the request.
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBuckets")
defer logger.AuditLog(w, r, "ListBuckets", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
listBuckets := objectAPI.ListBuckets
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// If etcd, dns federation configured list buckets from etcd.
var bucketsInfo []BucketInfo
if globalDNSConfig != nil {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && err != dns.ErrNoEntriesFound {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketSet := set.NewStringSet()
for _, dnsRecord := range dnsBuckets {
if bucketSet.Contains(dnsRecord.Key) {
continue
}
bucketsInfo = append(bucketsInfo, BucketInfo{
Name: dnsRecord.Key,
Created: dnsRecord.CreationDate,
})
bucketSet.Add(dnsRecord.Key)
}
} else {
// Invoke the list buckets.
var err error
bucketsInfo, err = listBuckets(ctx)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r)
var newBucketsInfo []BucketInfo
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
newBucketsInfo = append(newBucketsInfo, bucketInfo)
}
}
// Generate response.
response := generateListBucketsResponse(newBucketsInfo)
encodedSuccessResponse := encodeResponse(response)
// Write response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// DeleteMultipleObjectsHandler - deletes multiple objects.
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteMultipleObjects")
defer logger.AuditLog(w, r, "DeleteMultipleObjects", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
// Content-Md5 is requied should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header["Content-Md5"]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
// Allocate incoming content length bytes.
var deleteXMLBytes []byte
const maxBodySize = 2 * 1000 * 1024 // The max. XML contains 1000 object names (each at most 1024 bytes long) + XML overhead
if r.ContentLength > maxBodySize { // Only allocated memory for at most 1000 objects
deleteXMLBytes = make([]byte, maxBodySize)
} else {
deleteXMLBytes = make([]byte, r.ContentLength)
}
// Read incoming body XML bytes.
if _, err := io.ReadFull(r.Body, deleteXMLBytes); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAdminAPIErr(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Unmarshal list of keys to be deleted.
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
// Not required to check whether given objects exist or not, because
// DeleteMultipleObject is always successful irrespective of object existence.
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
deleteObjectsFn := objectAPI.DeleteObjects
if api.CacheAPI() != nil {
deleteObjectsFn = api.CacheAPI().DeleteObjects
}
type delObj struct {
origIndex int
name string
}
var objectsToDelete []delObj
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
for index, object := range deleteObjects.Objects {
if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone {
if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r))
return
}
continue
}
objectsToDelete = append(objectsToDelete, delObj{index, object.ObjectName})
}
toNames := func(input []delObj) (output []string) {
output = make([]string, len(input))
for i := range input {
output[i] = input[i].name
}
return
}
errs, err := deleteObjectsFn(ctx, bucket, toNames(objectsToDelete))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i, obj := range objectsToDelete {
dErrs[obj.origIndex] = toAPIErrorCode(ctx, errs[i])
}
// Collect deleted objects and errors if any.
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
for index, errCode := range dErrs {
object := deleteObjects.Objects[index]
// Success deleted objects are collected separately.
if errCode == ErrNone || errCode == ErrNoSuchKey {
deletedObjects = append(deletedObjects, object)
continue
}
apiErr := getAPIError(errCode)
// Error during delete should be collected separately.
deleteErrors = append(deleteErrors, DeleteError{
Code: apiErr.Code,
Message: apiErr.Description,
Key: object.ObjectName,
})
}
// Generate response
response := generateMultiDeleteResponse(deleteObjects.Quiet, deletedObjects, deleteErrors)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify deleted event for objects.
for _, dobj := range deletedObjects {
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: ObjectInfo{
Name: dobj.ObjectName,
},
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
}
// PutBucketHandler - PUT Bucket
// ----------
// This implementation of the PUT operation creates a new bucket for authenticated request
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucket")
defer logger.AuditLog(w, r, "PutBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.CreateBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Parse incoming location constraint.
location, s3Error := parseLocationConstraint(r)
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate if location sent by the client is valid, reject
// requests which do not follow valid region requirements.
if !isValidLocation(location) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRegion), r.URL, guessIsBrowserReq(r))
return
}
if globalDNSConfig != nil {
if _, err := globalDNSConfig.Get(bucket); err != nil {
if err == dns.ErrNoEntriesFound {
// Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(ctx, bucket)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Make sure to add Location information here only for bucket
w.Header().Set(xhttp.Location,
getObjectLocation(r, globalDomainNames, bucket, ""))
writeSuccessResponseHeadersOnly(w)
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBucketAlreadyOwnedByYou), r.URL, guessIsBrowserReq(r))
return
}
// Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Make sure to add Location information here only for bucket
w.Header().Set(xhttp.Location, path.Clean(r.URL.Path)) // Clean any trailing slashes.
writeSuccessResponseHeadersOnly(w)
}
// PostPolicyBucketHandler - POST policy
// ----------
// This implementation of the POST operation handles object creation with a specified
// signature policy in multipart/form-data
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PostPolicyBucket")
defer logger.AuditLog(w, r, "PostPolicyBucket", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !api.EncryptionEnabled() && crypto.IsRequested(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
bucket := mux.Vars(r)["bucket"]
// To detect if the client has disconnected.
r.Body = &detectDisconnect{r.Body, r.Context().Done()}
// Require Content-Length to be set in the request
size := r.ContentLength
if size < 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
return
}
// Make sure that the URL does not contain object name.
if bucket != filepath.Clean(resource[1:]) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
// Here the parameter is the size of the form data that should
// be loaded in memory, the remaining being put in temporary files.
reader, err := r.MultipartReader()
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
// Read multipart data and save in memory and in the disk if needed
form, err := reader.ReadForm(maxFormMemory)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
// Remove all tmp files created during multipart upload
defer form.RemoveAll()
// Extract all form fields
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(ctx, form)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
// Check if file is provided, error out otherwise.
if fileBody == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPOSTFileRequired), r.URL, guessIsBrowserReq(r))
return
}
// Close multipart file
defer fileBody.Close()
formValues.Set("Bucket", bucket)
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
}
object := formValues.Get("Key")
successRedirect := formValues.Get("success_action_redirect")
successStatus := formValues.Get("success_action_status")
var redirectURL *url.URL
if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
}
// Verify policy signature.
errCode := doesPolicySignatureMatch(formValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
return
}
// Handle policy if it is set.
if len(policyBytes) > 0 {
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
return
}
// Make sure formValues adhere to policy restrictions.
if err = checkPostPolicy(formValues, postPolicyForm); err != nil {
writeCustomErrorResponseXML(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), err.Error(), r.URL, guessIsBrowserReq(r))
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooSmall), r.URL, guessIsBrowserReq(r))
return
}
if fileSize > lengthRange.Max || isMaxObjectSize(fileSize) {
writeErrorResponse(ctx, w, toAPIError(ctx, errDataTooLarge), r.URL, guessIsBrowserReq(r))
return
}
}
}
// Extract metadata to be saved from received Form.
metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
var objectEncryptionKey []byte
// This request header needs to be set prior to setting ObjectOptions
if globalAutoEncryption && !crypto.SSEC.IsRequested(r.Header) {
r.Header.Add(crypto.SSEHeader, crypto.SSEAlgorithmAES256)
}
// get gateway encryption options
var opts ObjectOptions
opts, err = putOpts(ctx, r, bucket, object, metadata)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
if objectAPI.IsEncryptionSupported() {
if crypto.IsRequested(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE requests
if crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL, guessIsBrowserReq(r))
return
}
var reader io.Reader
var key []byte
if crypto.SSEC.IsRequested(formValues) {
key, err = ParseSSECustomerHeader(formValues)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
info := ObjectInfo{Size: fileSize}
// do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
location := getObjectLocation(r, globalDomainNames, bucket, object)
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
w.Header().Set(xhttp.Location, location)
// Notify object created event.
defer sendEvent(eventArgs{
EventName: event.ObjectCreatedPost,
BucketName: objInfo.Bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
if successRedirect != "" {
// Replace raw query params..
redirectURL.RawQuery = getRedirectPostRawQuery(objInfo)
writeRedirectSeeOther(w, redirectURL.String())
return
}
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201":
resp := encodeResponse(PostResponse{
Bucket: objInfo.Bucket,
Key: objInfo.Name,
ETag: `"` + objInfo.ETag + `"`,
Location: location,
})
writeResponse(w, http.StatusCreated, resp, "application/xml")
case "200":
writeSuccessResponseHeadersOnly(w)
default:
writeSuccessNoContent(w)
}
}
// HeadBucketHandler - HEAD Bucket
// ----------
// This operation is useful to determine if a bucket exists.
// The operation returns a 200 OK if the bucket exists and you
// have permission to access it. Otherwise, the operation might
// return responses such as 404 Not Found and 403 Forbidden.
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HeadBucket")
defer logger.AuditLog(w, r, "HeadBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
writeSuccessResponseHeadersOnly(w)
}
// DeleteBucketHandler - Delete bucket
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucket")
defer logger.AuditLog(w, r, "DeleteBucket", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
deleteBucket := objectAPI.DeleteBucket
// Attempt to delete bucket.
if err := deleteBucket(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again.
objectAPI.MakeBucketWithLocation(ctx, bucket, "")
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
globalNotificationSys.RemoveNotification(bucket)
globalPolicySys.Remove(bucket)
globalNotificationSys.DeleteBucket(ctx, bucket)
globalLifecycleSys.Remove(bucket)
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
// Write success response.
writeSuccessNoContent(w)
}
| cmd/bucket-handlers.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0005719896871596575,
0.00017693094559945166,
0.0001635445369174704,
0.00017092296911869198,
0.00004314452598919161
] |
{
"id": 1,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: minio-deployment
spec:
strategy:
type: Recreate
template:
metadata:
labels:
# Label is used as selector in the service.
app: minio
spec:
# Refer to the secret created earlier
volumes:
- name: gcs-credentials
secret:
# Name of the Secret created earlier
secretName: gcs-credentials
containers:
- name: minio
# Pulls the default Minio image from Docker Hub
image: minio/minio:RELEASE.2019-09-26T19-42-35Z
args:
- gateway
- gcs
- gcp_project_id
env:
# MinIO access key and secret key
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
# Google Cloud Service uses this variable
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/credentials/application_default_credentials.json"
ports:
- containerPort: 9000
# Mount the volume into the pod
volumeMounts:
- name: gcs-credentials
mountPath: "/etc/credentials"
readOnly: true | docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml | 1 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.9731499552726746,
0.19478371739387512,
0.00016745766333770007,
0.00017227364878635854,
0.3891831040382385
] |
{
"id": 1,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | #!/bin/bash
#
# Mint (C) 2019 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# handle command line arguments
if [ $# -ne 2 ]; then
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
exit -1
fi
output_log_file="$1"
error_log_file="$2"
# run tests
/mint/run/core/healthcheck/healthcheck 1>>"$output_log_file" 2>"$error_log_file"
| mint/run/core/healthcheck/run.sh | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00017300025501754135,
0.0001720091822789982,
0.00017151053179986775,
0.00017151674546767026,
7.008022748777876e-7
] |
{
"id": 1,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | /*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"crypto/hmac"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/sha256-simd"
)
// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the
// client did not calculate sha256 of the payload.
const unsignedPayload = "UNSIGNED-PAYLOAD"
// skipContentSha256Cksum returns true if caller needs to skip
// payload checksum, false if not.
func skipContentSha256Cksum(r *http.Request) bool {
var (
v []string
ok bool
)
if isRequestPresignedSignatureV4(r) {
v, ok = r.URL.Query()[xhttp.AmzContentSha256]
if !ok {
v, ok = r.Header[xhttp.AmzContentSha256]
}
} else {
v, ok = r.Header[xhttp.AmzContentSha256]
}
// If x-amz-content-sha256 is set and the value is not
// 'UNSIGNED-PAYLOAD' we should validate the content sha256.
return !(ok && v[0] != unsignedPayload)
}
// Returns SHA256 for calculating canonical-request.
func getContentSha256Cksum(r *http.Request, stype serviceType) string {
if stype == serviceSTS {
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit))
if err != nil {
logger.CriticalIf(context.Background(), err)
}
sum256 := sha256.New()
sum256.Write(payload)
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
return hex.EncodeToString(sum256.Sum(nil))
}
var (
defaultSha256Cksum string
v []string
ok bool
)
// For a presigned request we look at the query param for sha256.
if isRequestPresignedSignatureV4(r) {
// X-Amz-Content-Sha256, if not set in presigned requests, checksum
// will default to 'UNSIGNED-PAYLOAD'.
defaultSha256Cksum = unsignedPayload
v, ok = r.URL.Query()[xhttp.AmzContentSha256]
if !ok {
v, ok = r.Header[xhttp.AmzContentSha256]
}
} else {
// X-Amz-Content-Sha256, if not set in signed requests, checksum
// will default to sha256([]byte("")).
defaultSha256Cksum = emptySHA256
v, ok = r.Header[xhttp.AmzContentSha256]
}
// We found 'X-Amz-Content-Sha256' return the captured value.
if ok {
return v[0]
}
// We couldn't find 'X-Amz-Content-Sha256'.
return defaultSha256Cksum
}
// isValidRegion - verify if incoming region value is valid with configured Region.
func isValidRegion(reqRegion string, confRegion string) bool {
if confRegion == "" {
return true
}
if confRegion == "US" {
confRegion = globalMinioDefaultRegion
}
// Some older s3 clients set region as "US" instead of
// globalMinioDefaultRegion, handle it.
if reqRegion == "US" {
reqRegion = globalMinioDefaultRegion
}
return reqRegion == confRegion
}
// check if the access key is valid and recognized, additionally
// also returns if the access key is owner/admin.
func checkKeyValid(accessKey string) (auth.Credentials, bool, APIErrorCode) {
var owner = true
var cred = globalServerConfig.GetCredential()
if cred.AccessKey != accessKey {
if globalIAMSys == nil {
return cred, false, ErrInvalidAccessKeyID
}
// Check if the access key is part of users credentials.
var ok bool
if cred, ok = globalIAMSys.GetUser(accessKey); !ok {
return cred, false, ErrInvalidAccessKeyID
}
owner = false
}
return cred, owner, ErrNone
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// extractSignedHeaders extract signed headers from Authorization header
func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, APIErrorCode) {
reqHeaders := r.Header
reqQueries := r.URL.Query()
// find whether "host" is part of list of signed headers.
// if not return ErrUnsignedHeaders. "host" is mandatory.
if !contains(signedHeaders, "host") {
return nil, ErrUnsignedHeaders
}
extractedSignedHeaders := make(http.Header)
for _, header := range signedHeaders {
// `host` will not be found in the headers, can be found in r.Host.
// but its alway necessary that the list of signed headers containing host in it.
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
if !ok {
// try to set headers from Query String
val, ok = reqQueries[header]
}
if ok {
for _, enc := range val {
extractedSignedHeaders.Add(header, enc)
}
continue
}
switch header {
case "expect":
// Golang http server strips off 'Expect' header, if the
// client sent this as part of signed headers we need to
// handle otherwise we would see a signature mismatch.
// `aws-cli` sets this as part of signed headers.
//
// According to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20
// Expect header is always of form:
//
// Expect = "Expect" ":" 1#expectation
// expectation = "100-continue" | expectation-extension
//
// So it safe to assume that '100-continue' is what would
// be sent, for the time being keep this work around.
// Adding a *TODO* to remove this later when Golang server
// doesn't filter out the 'Expect' header.
extractedSignedHeaders.Set(header, "100-continue")
case "host":
// Go http server removes "host" from Request.Header
extractedSignedHeaders.Set(header, r.Host)
case "transfer-encoding":
// Go http server removes "host" from Request.Header
for _, enc := range r.TransferEncoding {
extractedSignedHeaders.Add(header, enc)
}
case "content-length":
// Signature-V4 spec excludes Content-Length from signed headers list for signature calculation.
// But some clients deviate from this rule. Hence we consider Content-Length for signature
// calculation to be compatible with such clients.
extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
default:
return nil, ErrUnsignedHeaders
}
}
return extractedSignedHeaders, ErrNone
}
// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
func signV4TrimAll(input string) string {
// Compress adjacent spaces (a space is determined by
// unicode.IsSpace() internally here) to one space and return
return strings.Join(strings.Fields(input), " ")
}
| cmd/signature-v4-utils.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00022850035747978836,
0.00017473135085310787,
0.00016526451508980244,
0.00016960503126028925,
0.000014105369700700976
] |
{
"id": 1,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | #!/bin/bash
#
# Mint (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# handle command line arguments
if [ $# -ne 2 ]; then
echo "usage: run.sh <OUTPUT-LOG-FILE> <ERROR-LOG-FILE>"
exit -1
fi
output_log_file="$1"
error_log_file="$2"
# run tests
/mint/run/core/aws-sdk-go/aws-sdk-go 1>>"$output_log_file" 2>"$error_log_file"
| mint/run/core/aws-sdk-go/run.sh | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0001716322440188378,
0.00017100076365750283,
0.0001698532869340852,
0.00017151674546767026,
8.127540809255152e-7
] |
{
"id": 2,
"code_window": [
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio-deployment\n",
"spec:\n",
" strategy:\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: minio
spec:
strategy:
# Specifies the strategy used to replace old Pods by new ones
# Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
type: Recreate
template:
metadata:
labels:
# This label is used as a selector in Service definition
app: minio
spec:
# Volumes used by this deployment
volumes:
- name: data
# This volume is based on PVC
persistentVolumeClaim:
# Name of the PVC created earlier
claimName: minio-pv-claim
containers:
- name: minio
# Volume mounts for this container
volumeMounts:
# Volume 'data' is mounted to path '/data'
- name: data
mountPath: "/data"
# Pulls the lastest Minio image from Docker Hub
image: minio/minio:RELEASE.2019-09-26T19-42-35Z
args:
- server
- /data
env:
# MinIO access key and secret key
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
ports:
- containerPort: 9000
# Readiness probe detects situations when MinIO server instance
# is not ready to accept traffic. Kubernetes doesn't forward
# traffic to the pod while readiness checks fail.
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 120
periodSeconds: 20
# Liveness probe detects situations where MinIO server instance
# is not working properly and needs restart. Kubernetes automatically
# restarts the pods if liveness checks fail.
livenessProbe:
httpGet:
path: /minio/health/live
port: 9000
initialDelaySeconds: 120
periodSeconds: 20
| docs/orchestration/kubernetes/minio-standalone-deployment.yaml | 1 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.12379837781190872,
0.01819298230111599,
0.00017528953321743757,
0.0005405530682764947,
0.04311489313840866
] |
{
"id": 2,
"code_window": [
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio-deployment\n",
"spec:\n",
" strategy:\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
// SetupType - enum for setup type.
type SetupType int
const (
// FSSetupType - FS setup type enum.
FSSetupType SetupType = iota + 1
// XLSetupType - XL setup type enum.
XLSetupType
// DistXLSetupType - Distributed XL setup type enum.
DistXLSetupType
// GatewaySetupType - gateway setup type enum.
GatewaySetupType
)
func (setupType SetupType) String() string {
switch setupType {
case FSSetupType:
return globalMinioModeFS
case XLSetupType:
return globalMinioModeXL
case DistXLSetupType:
return globalMinioModeDistXL
case GatewaySetupType:
return globalMinioModeGatewayPrefix
}
return ""
}
| cmd/setup-type.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00017525158182252198,
0.00017075767391361296,
0.00016393826808780432,
0.00017248601943720132,
0.000004569338670989964
] |
{
"id": 2,
"code_window": [
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio-deployment\n",
"spec:\n",
" strategy:\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | /*
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from "react"
import classNames from "classnames"
import { connect } from "react-redux"
import * as actionsBuckets from "./actions"
import { getCurrentBucket } from "./selectors"
import Dropdown from "react-bootstrap/lib/Dropdown"
export class BucketDropdown extends React.Component {
constructor(props) {
super(props)
this.state = {
showBucketDropdown: false
}
}
toggleDropdown() {
if (this.state.showBucketDropdown) {
this.setState({
showBucketDropdown: false
})
} else {
this.setState({
showBucketDropdown: true
})
}
}
render() {
const { bucket, showBucketPolicy, deleteBucket, currentBucket } = this.props
return (
<Dropdown
open = {this.state.showBucketDropdown}
onToggle = {this.toggleDropdown.bind(this)}
className="bucket-dropdown"
id="bucket-dropdown"
>
<Dropdown.Toggle noCaret>
<i className="zmdi zmdi-more-vert" />
</Dropdown.Toggle>
<Dropdown.Menu className="dropdown-menu-right">
<li>
<a
onClick={e => {
e.stopPropagation()
this.toggleDropdown()
showBucketPolicy()
}}
>
Edit policy
</a>
</li>
<li>
<a
onClick={e => {
e.stopPropagation()
this.toggleDropdown()
deleteBucket(bucket)
}}
>
Delete
</a>
</li>
</Dropdown.Menu>
</Dropdown>
)
}
}
const mapDispatchToProps = dispatch => {
return {
deleteBucket: bucket => dispatch(actionsBuckets.deleteBucket(bucket)),
showBucketPolicy: () => dispatch(actionsBuckets.showBucketPolicy())
}
}
export default connect(state => state, mapDispatchToProps)(BucketDropdown)
| browser/app/js/buckets/BucketDropdown.js | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00017710142128635198,
0.00016972070443443954,
0.00016173120820894837,
0.00016977899940684438,
0.000004068157068104483
] |
{
"id": 2,
"code_window": [
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio-deployment\n",
"spec:\n",
" strategy:\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lifecycle
import (
"encoding/xml"
"errors"
)
// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule.
type And struct {
XMLName xml.Name `xml:"And"`
Prefix string `xml:"Prefix,omitempty"`
Tags []Tag `xml:"Tag,omitempty"`
}
var errAndUnsupported = errors.New("Specifying <And></And> tag is not supported")
// UnmarshalXML is extended to indicate lack of support for And xml
// tag in object lifecycle configuration
func (a And) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
return errAndUnsupported
}
// MarshalXML is extended to leave out <And></And> tags
func (a And) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return nil
}
| pkg/lifecycle/and.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00017806014511734247,
0.0001713903184281662,
0.00016281820717267692,
0.00017203972674906254,
0.0000061217783695610706
] |
{
"id": 3,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: minio
spec:
strategy:
# Specifies the strategy used to replace old Pods by new ones
# Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
type: Recreate
template:
metadata:
labels:
# This label is used as a selector in Service definition
app: minio
spec:
# Volumes used by this deployment
volumes:
- name: data
# This volume is based on PVC
persistentVolumeClaim:
# Name of the PVC created earlier
claimName: minio-pv-claim
containers:
- name: minio
# Volume mounts for this container
volumeMounts:
# Volume 'data' is mounted to path '/data'
- name: data
mountPath: "/data"
# Pulls the lastest Minio image from Docker Hub
image: minio/minio:RELEASE.2019-09-26T19-42-35Z
args:
- server
- /data
env:
# MinIO access key and secret key
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
ports:
- containerPort: 9000
# Readiness probe detects situations when MinIO server instance
# is not ready to accept traffic. Kubernetes doesn't forward
# traffic to the pod while readiness checks fail.
readinessProbe:
httpGet:
path: /minio/health/ready
port: 9000
initialDelaySeconds: 120
periodSeconds: 20
# Liveness probe detects situations where MinIO server instance
# is not working properly and needs restart. Kubernetes automatically
# restarts the pods if liveness checks fail.
livenessProbe:
httpGet:
path: /minio/health/live
port: 9000
initialDelaySeconds: 120
periodSeconds: 20
| docs/orchestration/kubernetes/minio-standalone-deployment.yaml | 1 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.9341191649436951,
0.13513195514678955,
0.00016676961968187243,
0.0002816664637066424,
0.32620519399642944
] |
{
"id": 3,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | FROM golang:1.13-alpine
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio
FROM alpine:3.9
LABEL maintainer="MinIO Inc <[email protected]>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' 'su-exec>=0.2' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf && \
curl https://dl.min.io/server/minio/release/linux-amd64/minio > /usr/bin/minio && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh
EXPOSE 9000
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
CMD ["minio"]
| Dockerfile.release | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0001928014971781522,
0.00017623670282773674,
0.00016425168723799288,
0.00017394681344740093,
0.000011900477147719357
] |
{
"id": 3,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | #!/bin/bash -e
#
# Mint (C) 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
export APT="apt --quiet --yes"
# remove all packages listed in remove-packages.list
xargs --arg-file="${MINT_ROOT_DIR}/remove-packages.list" apt --quiet --yes purge
${APT} autoremove
# remove unwanted files
rm -fr "$GOROOT" "$GOPATH/src" /var/lib/apt/lists/*
# flush to disk
sync
| mint/postinstall.sh | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00017341163766104728,
0.00017092366761062294,
0.00016740657156333327,
0.0001719528081594035,
0.000002557283096393803
] |
{
"id": 3,
"code_window": [
"apiVersion: extensions/v1beta1\n",
"kind: Deployment\n",
"metadata:\n",
" # This name uniquely identifies the Deployment\n"
],
"labels": [
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"apiVersion: apps/v1\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "replace",
"edit_start_line_idx": 0
} | /*
* MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"encoding/base64"
"net/http"
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/sio"
)
var encryptRequestTests = []struct {
header map[string]string
metadata map[string]string
}{
{
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{},
},
{
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
},
},
}
func TestEncryptRequest(t *testing.T) {
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
globalIsSSL = true
for i, test := range encryptRequestTests {
content := bytes.NewReader(make([]byte, 64))
req := &http.Request{Header: http.Header{}}
for k, v := range test.header {
req.Header.Set(k, v)
}
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
if err != nil {
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
}
if kdf, ok := test.metadata[crypto.SSESealAlgorithm]; !ok {
t.Errorf("Test %d: ServerSideEncryptionKDF must be part of metadata: %v", i, kdf)
}
if iv, ok := test.metadata[crypto.SSEIV]; !ok {
t.Errorf("Test %d: crypto.SSEIV must be part of metadata: %v", i, iv)
}
if mac, ok := test.metadata[crypto.SSECSealedKey]; !ok {
t.Errorf("Test %d: ServerSideEncryptionKeyMAC must be part of metadata: %v", i, mac)
}
}
}
var decryptRequestTests = []struct {
bucket, object string
header map[string]string
metadata map[string]string
shouldFail bool
}{
{
bucket: "bucket",
object: "object",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm,
crypto.SSEIV: "7nQqotA8xgrPx6QK7Ap3GCfjKitqJSrGP7xzgErSJlw=",
crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA==",
},
shouldFail: false,
},
{
bucket: "bucket",
object: "object",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: crypto.SealAlgorithm,
crypto.SSEIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
crypto.SSECSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
},
shouldFail: false,
},
{
bucket: "bucket",
object: "object",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: "HMAC-SHA3",
crypto.SSEIV: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
},
shouldFail: true,
},
{
bucket: "bucket",
object: "object",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm,
crypto.SSEIV: "RrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hcs4rR9z/CUuPqu5N4=",
},
shouldFail: true,
},
{
bucket: "bucket",
object: "object",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "XAm0dRrJsEsyPb1UuFNezv1bl9hxuYsgUVC/MUctE2k=",
crypto.SSECKeyMD5: "bY4wkxQejw9mUJfo72k53A==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm,
crypto.SSEIV: "XAm0dRrJsEsyPb1UuFNezv1bl9ehxuYsgUVC/MUctE2k=",
crypto.SSECSealedKey: "SY5E9AvI2tI7/nUrUAssIGE32Hds4rR9z/CUuPqu5N4=",
},
shouldFail: true,
},
{
bucket: "bucket",
object: "object-2",
header: map[string]string{
crypto.SSECAlgorithm: "AES256",
crypto.SSECKey: "MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ=",
crypto.SSECKeyMD5: "7PpPLAK26ONlVUGOWlusfg==",
},
metadata: map[string]string{
crypto.SSESealAlgorithm: crypto.SealAlgorithm,
crypto.SSEIV: "qEqmsONcorqlcZXJxaw32H04eyXyXwUgjHzlhkaIYrU=",
crypto.SSECSealedKey: "IAAfAIM14ugTGcM/dIrn4iQMrkl1sjKyeBQ8FBEvRebYj8vWvxG+0cJRpC6NXRU1wJN50JaUOATjO7kz0wZ2mA==",
},
shouldFail: true,
},
}
func TestDecryptRequest(t *testing.T) {
defer func(flag bool) { globalIsSSL = flag }(globalIsSSL)
globalIsSSL = true
for i, test := range decryptRequestTests[1:] {
client := bytes.NewBuffer(nil)
req := &http.Request{Header: http.Header{}}
for k, v := range test.header {
req.Header.Set(k, v)
}
_, err := DecryptRequest(client, req, test.bucket, test.object, test.metadata)
if err != nil && !test.shouldFail {
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
}
if err == nil && test.shouldFail {
t.Fatalf("Test %d: should fail but passed", i)
}
if key, ok := test.metadata[crypto.SSECKey]; ok {
t.Errorf("Test %d: Client provided key survived in metadata - key: %s", i, key)
}
if kdf, ok := test.metadata[crypto.SSESealAlgorithm]; ok && !test.shouldFail {
t.Errorf("Test %d: ServerSideEncryptionKDF should not be part of metadata: %v", i, kdf)
}
if iv, ok := test.metadata[crypto.SSEIV]; ok && !test.shouldFail {
t.Errorf("Test %d: crypto.SSEIV should not be part of metadata: %v", i, iv)
}
if mac, ok := test.metadata[crypto.SSECSealedKey]; ok && !test.shouldFail {
t.Errorf("Test %d: ServerSideEncryptionKeyMAC should not be part of metadata: %v", i, mac)
}
}
}
var decryptObjectInfoTests = []struct {
info ObjectInfo
headers http.Header
expErr error
}{
{
info: ObjectInfo{Size: 100},
headers: http.Header{},
expErr: nil,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
expErr: nil,
},
{
info: ObjectInfo{Size: 0, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
expErr: nil,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA=="}},
headers: http.Header{},
expErr: errEncryptedObject,
},
{
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
expErr: errInvalidEncryptionParameters,
},
{
info: ObjectInfo{Size: 31, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
expErr: errObjectTampered,
},
}
func TestDecryptObjectInfo(t *testing.T) {
for i, test := range decryptObjectInfoTests {
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
} else if !encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is not encrypted but it is", i)
}
}
}
// Tests for issue reproduced when getting the right encrypted
// offset of the object.
func TestGetDecryptedRange_Issue50(t *testing.T) {
rs, err := parseRequestRangeSpec("bytes=594870256-594870263")
if err != nil {
t.Fatal(err)
}
objInfo := ObjectInfo{
Bucket: "bucket",
Name: "object",
Size: 595160760,
UserDefined: map[string]string{
crypto.SSEMultipart: "",
crypto.SSEIV: "HTexa=",
crypto.SSESealAlgorithm: "DAREv2-HMAC-SHA256",
crypto.SSECSealedKey: "IAA8PGAA==",
ReservedMetadataPrefix + "actual-size": "594870264",
"content-type": "application/octet-stream",
"etag": "166b1545b4c1535294ee0686678bea8c-2",
},
Parts: []ObjectPartInfo{
{
Number: 1,
Name: "part.1",
ETag: "etag1",
Size: 297580380,
ActualSize: 297435132,
},
{
Number: 2,
Name: "part.2",
ETag: "etag2",
Size: 297580380,
ActualSize: 297435132,
},
},
}
encOff, encLength, skipLen, seqNumber, partStart, err := objInfo.GetDecryptedRange(rs)
if err != nil {
t.Fatalf("Test: failed %s", err)
}
if encOff != 595127964 {
t.Fatalf("Test: expected %d, got %d", 595127964, encOff)
}
if encLength != 32796 {
t.Fatalf("Test: expected %d, got %d", 32796, encLength)
}
if skipLen != 32756 {
t.Fatalf("Test: expected %d, got %d", 32756, skipLen)
}
if seqNumber != 4538 {
t.Fatalf("Test: expected %d, got %d", 4538, seqNumber)
}
if partStart != 1 {
t.Fatalf("Test: expected %d, got %d", 1, partStart)
}
}
func TestGetDecryptedRange(t *testing.T) {
var (
pkgSz = int64(64) * humanize.KiByte
minPartSz = int64(5) * humanize.MiByte
maxPartSz = int64(5) * humanize.GiByte
getEncSize = func(s int64) int64 {
v, _ := sio.EncryptedSize(uint64(s))
return int64(v)
}
udMap = func(isMulti bool) map[string]string {
m := map[string]string{
crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm,
crypto.SSEMultipart: "1",
}
if !isMulti {
delete(m, crypto.SSEMultipart)
}
return m
}
)
// Single part object tests
var (
mkSPObj = func(s int64) ObjectInfo {
return ObjectInfo{
Size: getEncSize(s),
UserDefined: udMap(false),
}
}
)
testSP := []struct {
decSz int64
oi ObjectInfo
}{
{0, mkSPObj(0)},
{1, mkSPObj(1)},
{pkgSz - 1, mkSPObj(pkgSz - 1)},
{pkgSz, mkSPObj(pkgSz)},
{2*pkgSz - 1, mkSPObj(2*pkgSz - 1)},
{minPartSz, mkSPObj(minPartSz)},
{maxPartSz, mkSPObj(maxPartSz)},
}
for i, test := range testSP {
{
// nil range
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != getEncSize(test.decSz) {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
if test.decSz >= 10 {
// first 10 bytes
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 0, 9})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen = pkgSz + 32
if test.decSz < pkgSz {
rLen = test.decSz + 32
}
if skip != 0 || sn != 0 || ps != 0 || o != 0 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
kb32 := int64(32) * humanize.KiByte
if test.decSz >= (64+32)*humanize.KiByte {
// Skip the first 32Kib, and read the next 64Kib
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, kb32, 3*kb32 - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen = (pkgSz + 32) * 2
if test.decSz < 2*pkgSz {
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32)
}
if skip != kb32 || sn != 0 || ps != 0 || o != 0 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
if test.decSz >= (64*2+32)*humanize.KiByte {
// Skip the first 96Kib and read the next 64Kib
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, 3 * kb32, 5*kb32 - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
var rLen = (pkgSz + 32) * 2
if test.decSz-pkgSz < 2*pkgSz {
rLen = (pkgSz + 32) + (test.decSz - pkgSz + 32*2)
}
if skip != kb32 || sn != 1 || ps != 0 || o != pkgSz+32 || l != rLen {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
}
// Multipart object tests
var (
// make a multipart object-info given part sizes
mkMPObj = func(sizes []int64) ObjectInfo {
r := make([]ObjectPartInfo, len(sizes))
sum := int64(0)
for i, s := range sizes {
r[i].Number = i
r[i].Size = int64(getEncSize(s))
sum += r[i].Size
}
return ObjectInfo{
Size: sum,
UserDefined: udMap(true),
Parts: r,
}
}
// Simple useful utilities
repeat = func(k int64, n int) []int64 {
a := []int64{}
for i := 0; i < n; i++ {
a = append(a, k)
}
return a
}
lsum = func(s []int64) int64 {
sum := int64(0)
for _, i := range s {
if i < 0 {
return -1
}
sum += i
}
return sum
}
esum = func(oi ObjectInfo) int64 {
sum := int64(0)
for _, i := range oi.Parts {
sum += i.Size
}
return sum
}
)
s1 := []int64{5487701, 5487799, 3}
s2 := repeat(5487701, 5)
s3 := repeat(maxPartSz, 10000)
testMPs := []struct {
decSizes []int64
oi ObjectInfo
}{
{s1, mkMPObj(s1)},
{s2, mkMPObj(s2)},
{s3, mkMPObj(s3)},
}
// This function is a reference (re-)implementation of
// decrypted range computation, written solely for the purpose
// of the unit tests.
//
// `s` gives the decrypted part sizes, and the other
// parameters describe the desired read segment. When
// `isFromEnd` is true, `skipLen` argument is ignored.
decryptedRangeRef := func(s []int64, skipLen, readLen int64, isFromEnd bool) (o, l, skip int64, sn uint32, ps int) {
oSize := lsum(s)
if isFromEnd {
skipLen = oSize - readLen
}
if skipLen < 0 || readLen < 0 || oSize < 0 || skipLen+readLen > oSize {
t.Fatalf("Impossible read specified: %d %d %d", skipLen, readLen, oSize)
}
var cumulativeSum, cumulativeEncSum int64
toRead := readLen
readStart := false
for i, v := range s {
partOffset := int64(0)
partDarePkgOffset := int64(0)
if !readStart && cumulativeSum+v > skipLen {
// Read starts at the current part
readStart = true
partOffset = skipLen - cumulativeSum
// All return values except `l` are
// calculated here.
sn = uint32(partOffset / pkgSz)
skip = partOffset % pkgSz
ps = i
o = cumulativeEncSum + int64(sn)*(pkgSz+32)
partDarePkgOffset = partOffset - skip
}
if readStart {
currentPartBytes := v - partOffset
currentPartDareBytes := v - partDarePkgOffset
if currentPartBytes < toRead {
toRead -= currentPartBytes
l += getEncSize(currentPartDareBytes)
} else {
// current part has the last
// byte required
lbPartOffset := partOffset + toRead - 1
// round up the lbPartOffset
// to the end of the
// corresponding DARE package
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
if lbPkgEndOffset > v {
lbPkgEndOffset = v
}
bytesToDrop := v - lbPkgEndOffset
// Last segment to update `l`
l += getEncSize(currentPartDareBytes - bytesToDrop)
break
}
}
cumulativeSum += v
cumulativeEncSum += getEncSize(v)
}
return
}
for i, test := range testMPs {
{
// nil range
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(nil)
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
if o != 0 || l != esum(test.oi) || skip != 0 || sn != 0 || ps != 0 {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
// Skip 1Mib and read 1Mib (in the decrypted object)
//
// The check below ensures the object is large enough
// for the read.
if lsum(test.decSizes) >= 2*humanize.MiByte {
skipLen, readLen := int64(1)*humanize.MiByte, int64(1)*humanize.MiByte
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{false, skipLen, skipLen + readLen - 1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, skipLen, readLen, false)
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
}
}
// Read the last 6Mib+1 bytes of the (decrypted)
// object
//
// The check below ensures the object is large enough
// for the read.
readLen := int64(6)*humanize.MiByte + 1
if lsum(test.decSizes) >= readLen {
o, l, skip, sn, ps, err := test.oi.GetDecryptedRange(&HTTPRangeSpec{true, -readLen, -1})
if err != nil {
t.Errorf("Case %d: unexpected err: %v", i, err)
}
oRef, lRef, skipRef, snRef, psRef := decryptedRangeRef(test.decSizes, 0, readLen, true)
if o != oRef || l != lRef || skip != skipRef || sn != snRef || ps != psRef {
t.Errorf("Case %d: test failed: %d %d %d %d %d (Ref: %d %d %d %d %d)",
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
}
}
}
}
var getDefaultOptsTests = []struct {
headers http.Header
copySource bool
metadata map[string]string
encryptionType encrypt.Type
err error
}{
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 0
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: "",
err: nil}, // 1
{headers: http.Header{crypto.SSECAlgorithm: []string{"AES256"},
crypto.SSECKey: []string{"Mz"},
crypto.SSECKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: crypto.ErrInvalidCustomerKey}, // 2
{headers: http.Header{crypto.SSEHeader: []string{"AES256"}},
copySource: false,
metadata: nil,
encryptionType: encrypt.S3,
err: nil}, // 3
{headers: http.Header{},
copySource: false,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: encrypt.S3,
err: nil}, // 4
{headers: http.Header{},
copySource: true,
metadata: map[string]string{crypto.S3SealedKey: base64.StdEncoding.EncodeToString(make([]byte, 64)),
crypto.S3KMSKeyID: "kms-key",
crypto.S3KMSSealedKey: "m-key"},
encryptionType: "",
err: nil}, // 5
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: true,
metadata: nil,
encryptionType: encrypt.SSEC,
err: nil}, // 6
{headers: http.Header{crypto.SSECopyAlgorithm: []string{"AES256"},
crypto.SSECopyKey: []string{"MzJieXRlc2xvbmdzZWNyZXRrZXltdXN0cHJvdmlkZWQ="},
crypto.SSECopyKeyMD5: []string{"7PpPLAK26ONlVUGOWlusfg=="}},
copySource: false,
metadata: nil,
encryptionType: "",
err: nil}, // 7
}
func TestGetDefaultOpts(t *testing.T) {
for i, test := range getDefaultOptsTests {
opts, err := getDefaultOpts(test.headers, test.copySource, test.metadata)
if test.err != err {
t.Errorf("Case %d: expected err: %v , actual err: %v", i, test.err, err)
}
if err == nil {
if opts.ServerSideEncryption == nil && test.encryptionType != "" {
t.Errorf("Case %d: expected opts to be of %v encryption type", i, test.encryptionType)
}
if opts.ServerSideEncryption != nil && test.encryptionType != opts.ServerSideEncryption.Type() {
t.Errorf("Case %d: expected opts to have encryption type %v but was %v ", i, test.encryptionType, opts.ServerSideEncryption.Type())
}
}
}
}
| cmd/encryption-v1_test.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0005104067386128008,
0.0001941648661158979,
0.00016489476547576487,
0.00016932853031903505,
0.00006722319812979549
] |
{
"id": 4,
"code_window": [
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio\n",
"spec:\n",
" strategy:\n",
" # Specifies the strategy used to replace old Pods by new ones\n",
" # Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# This name uniquely identifies the Deployment
name: minio-deployment
spec:
strategy:
type: Recreate
template:
metadata:
labels:
# Label is used as selector in the service.
app: minio
spec:
# Refer to the secret created earlier
volumes:
- name: gcs-credentials
secret:
# Name of the Secret created earlier
secretName: gcs-credentials
containers:
- name: minio
# Pulls the default Minio image from Docker Hub
image: minio/minio:RELEASE.2019-09-26T19-42-35Z
args:
- gateway
- gcs
- gcp_project_id
env:
# MinIO access key and secret key
- name: MINIO_ACCESS_KEY
value: "minio"
- name: MINIO_SECRET_KEY
value: "minio123"
# Google Cloud Service uses this variable
- name: GOOGLE_APPLICATION_CREDENTIALS
value: "/etc/credentials/application_default_credentials.json"
ports:
- containerPort: 9000
# Mount the volume into the pod
volumeMounts:
- name: gcs-credentials
mountPath: "/etc/credentials"
readOnly: true | docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml | 1 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.12627804279327393,
0.02542142942547798,
0.000174972417880781,
0.0002220262831542641,
0.05042831599712372
] |
{
"id": 4,
"code_window": [
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio\n",
"spec:\n",
" strategy:\n",
" # Specifies the strategy used to replace old Pods by new ones\n",
" # Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | ## `minio-go` tests
This directory serves as the location for Mint tests using `minio-go`. Top level `mint.sh` calls `run.sh` to execute tests.
## Adding new tests
New tests are added in functional tests of minio-go. Please check https://github.com/minio/minio-go
## Running tests manually
- Set environment variables `MINT_DATA_DIR`, `MINT_MODE`, `SERVER_ENDPOINT`, `ACCESS_KEY`, `SECRET_KEY`, `SERVER_REGION` and `ENABLE_HTTPS`
- Call `run.sh` with output log file and error log file. for example
```bash
export MINT_DATA_DIR=~/my-mint-dir
export MINT_MODE=core
export SERVER_ENDPOINT="play.minio.io:9000"
export ACCESS_KEY="Q3AM3UQ867SPQQA43P2F"
export SECRET_KEY="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
export ENABLE_HTTPS=1
export SERVER_REGION=us-east-1
./run.sh /tmp/output.log /tmp/error.log
```
| mint/run/core/minio-go/README.md | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.00016825231432449073,
0.00016530805442016572,
0.0001623637945158407,
0.00016530805442016572,
0.0000029442599043250084
] |
{
"id": 4,
"code_window": [
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio\n",
"spec:\n",
" strategy:\n",
" # Specifies the strategy used to replace old Pods by new ones\n",
" # Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package condition
import (
"encoding/base64"
"fmt"
"net/http"
"sort"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/set"
)
func toBinaryEqualsFuncString(n name, key Key, values set.StringSet) string {
valueStrings := values.ToSlice()
sort.Strings(valueStrings)
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
}
// binaryEqualsFunc - String equals function. It checks whether value by Key in given
// values map is in condition values.
// For example,
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
// in value map for Key is in values.
type binaryEqualsFunc struct {
k Key
values set.StringSet
}
// evaluate() - evaluates to check whether value by Key in given values is in
// condition values.
func (f binaryEqualsFunc) evaluate(values map[string][]string) bool {
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
if !ok {
requestValue = values[f.k.Name()]
}
fvalues := f.values.ApplyFunc(substFuncFromValues(values))
return !fvalues.Intersection(set.CreateStringSet(requestValue...)).IsEmpty()
}
// key() - returns condition key which is used by this condition function.
func (f binaryEqualsFunc) key() Key {
return f.k
}
// name() - returns "BinaryEquals" condition name.
func (f binaryEqualsFunc) name() name {
return binaryEquals
}
func (f binaryEqualsFunc) String() string {
return toBinaryEqualsFuncString(binaryEquals, f.k, f.values)
}
// toMap - returns map representation of this function.
func (f binaryEqualsFunc) toMap() map[Key]ValueSet {
if !f.k.IsValid() {
return nil
}
values := NewValueSet()
for _, value := range f.values.ToSlice() {
values.Add(NewStringValue(base64.StdEncoding.EncodeToString([]byte(value))))
}
return map[Key]ValueSet{
f.k: values,
}
}
func validateBinaryEqualsValues(n name, key Key, values set.StringSet) error {
vslice := values.ToSlice()
for _, s := range vslice {
sbytes, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return err
}
values.Remove(s)
s = string(sbytes)
switch key {
case S3XAmzCopySource:
bucket, object := path2BucketAndObject(s)
if object == "" {
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n)
}
if err = s3utils.CheckValidBucketName(bucket); err != nil {
return err
}
case S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionCustomerAlgorithm:
if s != "AES256" {
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n)
}
case S3XAmzMetadataDirective:
if s != "COPY" && s != "REPLACE" {
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n)
}
}
values.Add(s)
}
return nil
}
// newBinaryEqualsFunc - returns new BinaryEquals function.
func newBinaryEqualsFunc(key Key, values ValueSet) (Function, error) {
valueStrings, err := valuesToStringSlice(binaryEquals, values)
if err != nil {
return nil, err
}
return NewBinaryEqualsFunc(key, valueStrings...)
}
// NewBinaryEqualsFunc - returns new BinaryEquals function.
func NewBinaryEqualsFunc(key Key, values ...string) (Function, error) {
sset := set.CreateStringSet(values...)
if err := validateBinaryEqualsValues(binaryEquals, key, sset); err != nil {
return nil, err
}
return &binaryEqualsFunc{key, sset}, nil
}
| pkg/policy/condition/binaryequalsfunc.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0004957464407198131,
0.00019573650206439197,
0.00016467494424432516,
0.00017327515524812043,
0.00008338991756318137
] |
{
"id": 4,
"code_window": [
"metadata:\n",
" # This name uniquely identifies the Deployment\n",
" name: minio\n",
"spec:\n",
" strategy:\n",
" # Specifies the strategy used to replace old Pods by new ones\n",
" # Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy\n",
" type: Recreate\n",
" template:\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" selector:\n",
" matchLabels:\n",
" app: minio # has to match .spec.template.metadata.labels\n"
],
"file_path": "docs/orchestration/kubernetes/minio-standalone-deployment.yaml",
"type": "add",
"edit_start_line_idx": 6
} | // +build linux
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mountinfo
// mountInfo - This represents a single line in /proc/mounts.
type mountInfo struct {
Device string
Path string
FSType string
Options []string
Freq string
Pass string
}
func (m mountInfo) String() string {
return m.Path
}
// mountInfos - This represents the entire /proc/mounts.
type mountInfos []mountInfo
| pkg/mountinfo/mountinfo.go | 0 | https://github.com/minio/minio/commit/4f981a0b42dd35ccfca8fb9f21b058206402bcb6 | [
0.0001772196701494977,
0.0001694545935606584,
0.00016271235654130578,
0.0001689431956037879,
0.0000055574009820702486
] |
{
"id": 0,
"code_window": [
" # one of always, never, when-maximised\n",
" # this determines whether the git graph is rendered in the commits panel\n",
" showGraph: 'when-maximised'\n",
" skipHookPrefix: WIP\n",
" autoFetch: true\n",
" autoRefresh: true\n",
" branchLogCmd: 'git log --graph --color=always --abbrev-commit --decorate --date=relative --pretty=medium {{branchName}} --'\n",
" allBranchesLogCmd: 'git log --graph --all --color=always --abbrev-commit --decorate --date=relative --pretty=medium'\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" # displays the whole git graph by default in the commits panel\n",
" showWholeGraph: false\n"
],
"file_path": "docs/Config.md",
"type": "add",
"edit_start_line_idx": 77
} | package context
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/gui/types"
)
type LocalCommitsContext struct {
*LocalCommitsViewModel
*ViewportListContextTrait
}
var _ types.IListContext = (*LocalCommitsContext)(nil)
func NewLocalCommitsContext(
getModel func() []*models.Commit,
view *gocui.View,
getDisplayStrings func(startIdx int, length int) [][]string,
onFocus func(...types.OnFocusOpts) error,
onRenderToMain func(...types.OnFocusOpts) error,
onFocusLost func() error,
c *types.HelperCommon,
) *LocalCommitsContext {
viewModel := NewLocalCommitsViewModel(getModel)
return &LocalCommitsContext{
LocalCommitsViewModel: viewModel,
ViewportListContextTrait: &ViewportListContextTrait{
ListContextTrait: &ListContextTrait{
Context: NewSimpleContext(NewBaseContext(NewBaseContextOpts{
ViewName: "commits",
WindowName: "commits",
Key: LOCAL_COMMITS_CONTEXT_KEY,
Kind: types.SIDE_CONTEXT,
Focusable: true,
}), ContextCallbackOpts{
OnFocus: onFocus,
OnFocusLost: onFocusLost,
OnRenderToMain: onRenderToMain,
}),
list: viewModel,
viewTrait: NewViewTrait(view),
getDisplayStrings: getDisplayStrings,
c: c,
},
},
}
}
func (self *LocalCommitsContext) GetSelectedItemId() string {
item := self.GetSelected()
if item == nil {
return ""
}
return item.ID()
}
type LocalCommitsViewModel struct {
*BasicViewModel[*models.Commit]
// If this is true we limit the amount of commits we load, for the sake of keeping things fast.
// If the user attempts to scroll past the end of the list, we will load more commits.
limitCommits bool
// If this is true we'll use git log --all when fetching the commits.
showWholeGitGraph bool
}
func NewLocalCommitsViewModel(getModel func() []*models.Commit) *LocalCommitsViewModel {
self := &LocalCommitsViewModel{
BasicViewModel: NewBasicViewModel(getModel),
limitCommits: true,
}
return self
}
func (self *LocalCommitsContext) CanRebase() bool {
return true
}
func (self *LocalCommitsContext) GetSelectedRef() types.Ref {
commit := self.GetSelected()
if commit == nil {
return nil
}
return commit
}
func (self *LocalCommitsViewModel) SetLimitCommits(value bool) {
self.limitCommits = value
}
func (self *LocalCommitsViewModel) GetLimitCommits() bool {
return self.limitCommits
}
func (self *LocalCommitsViewModel) SetShowWholeGitGraph(value bool) {
self.showWholeGitGraph = value
}
func (self *LocalCommitsViewModel) GetShowWholeGitGraph() bool {
return self.showWholeGitGraph
}
func (self *LocalCommitsViewModel) GetCommits() []*models.Commit {
return self.getModel()
}
| pkg/gui/context/local_commits_context.go | 1 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.0029935818165540695,
0.0005121869035065174,
0.00016755174146965146,
0.0002091269416268915,
0.0007702581351622939
] |
{
"id": 0,
"code_window": [
" # one of always, never, when-maximised\n",
" # this determines whether the git graph is rendered in the commits panel\n",
" showGraph: 'when-maximised'\n",
" skipHookPrefix: WIP\n",
" autoFetch: true\n",
" autoRefresh: true\n",
" branchLogCmd: 'git log --graph --color=always --abbrev-commit --decorate --date=relative --pretty=medium {{branchName}} --'\n",
" allBranchesLogCmd: 'git log --graph --all --color=always --abbrev-commit --decorate --date=relative --pretty=medium'\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" # displays the whole git graph by default in the commits panel\n",
" showWholeGraph: false\n"
],
"file_path": "docs/Config.md",
"type": "add",
"edit_start_line_idx": 77
} | 0000000000000000000000000000000000000000 5d98350a913b48a35001ff9b54335f065b25fd7c CI <[email protected]> 1648348611 +1100 commit (initial): myfile1
5d98350a913b48a35001ff9b54335f065b25fd7c 5b85aaf0806d1bc5830bb10291727f773c3402dc CI <[email protected]> 1648348611 +1100 commit: myfile2
5b85aaf0806d1bc5830bb10291727f773c3402dc 3e912c74bc7c237df0c521aff7b3f4932d7e8616 CI <[email protected]> 1648348611 +1100 commit: myfile3
3e912c74bc7c237df0c521aff7b3f4932d7e8616 d108cb97213835c25d44e14d167e7c5b48f94ce2 CI <[email protected]> 1648348611 +1100 commit: myfile4
| test/integration/pushWithCredentials/expected/repo/.git_keep/logs/refs/heads/master | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.00016255542868748307,
0.00016255542868748307,
0.00016255542868748307,
0.00016255542868748307,
0
] |
{
"id": 0,
"code_window": [
" # one of always, never, when-maximised\n",
" # this determines whether the git graph is rendered in the commits panel\n",
" showGraph: 'when-maximised'\n",
" skipHookPrefix: WIP\n",
" autoFetch: true\n",
" autoRefresh: true\n",
" branchLogCmd: 'git log --graph --color=always --abbrev-commit --decorate --date=relative --pretty=medium {{branchName}} --'\n",
" allBranchesLogCmd: 'git log --graph --all --color=always --abbrev-commit --decorate --date=relative --pretty=medium'\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" # displays the whole git graph by default in the commits panel\n",
" showWholeGraph: false\n"
],
"file_path": "docs/Config.md",
"type": "add",
"edit_start_line_idx": 77
} | d0ce4cb10cd926f646a08889b077a6d7eddd3534
| test/integration/rememberCommitMessageAfterFail/expected/repo/.git_keep/refs/heads/master | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.00019225334108341485,
0.00019225334108341485,
0.00019225334108341485,
0.00019225334108341485,
0
] |
{
"id": 0,
"code_window": [
" # one of always, never, when-maximised\n",
" # this determines whether the git graph is rendered in the commits panel\n",
" showGraph: 'when-maximised'\n",
" skipHookPrefix: WIP\n",
" autoFetch: true\n",
" autoRefresh: true\n",
" branchLogCmd: 'git log --graph --color=always --abbrev-commit --decorate --date=relative --pretty=medium {{branchName}} --'\n",
" allBranchesLogCmd: 'git log --graph --all --color=always --abbrev-commit --decorate --date=relative --pretty=medium'\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" # displays the whole git graph by default in the commits panel\n",
" showWholeGraph: false\n"
],
"file_path": "docs/Config.md",
"type": "add",
"edit_start_line_idx": 77
} | test2
| test/integration/commit/expected/repo/myfile2 | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.00017498859961051494,
0.00017498859961051494,
0.00017498859961051494,
0.00017498859961051494,
0
] |
{
"id": 1,
"code_window": [
"type LogConfig struct {\n",
"\tOrder string `yaml:\"order\"` // one of date-order, author-date-order, topo-order\n",
"\tShowGraph string `yaml:\"showGraph\"` // one of always, never, when-maximised\n",
"}\n",
"\n",
"type CommitPrefixConfig struct {\n",
"\tPattern string `yaml:\"pattern\"`\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tShowWholeGraph bool `yaml:\"showWholeGraph\"`\n"
],
"file_path": "pkg/config/user_config.go",
"type": "add",
"edit_start_line_idx": 108
} | package context
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/gui/types"
)
type LocalCommitsContext struct {
*LocalCommitsViewModel
*ViewportListContextTrait
}
var _ types.IListContext = (*LocalCommitsContext)(nil)
func NewLocalCommitsContext(
getModel func() []*models.Commit,
view *gocui.View,
getDisplayStrings func(startIdx int, length int) [][]string,
onFocus func(...types.OnFocusOpts) error,
onRenderToMain func(...types.OnFocusOpts) error,
onFocusLost func() error,
c *types.HelperCommon,
) *LocalCommitsContext {
viewModel := NewLocalCommitsViewModel(getModel)
return &LocalCommitsContext{
LocalCommitsViewModel: viewModel,
ViewportListContextTrait: &ViewportListContextTrait{
ListContextTrait: &ListContextTrait{
Context: NewSimpleContext(NewBaseContext(NewBaseContextOpts{
ViewName: "commits",
WindowName: "commits",
Key: LOCAL_COMMITS_CONTEXT_KEY,
Kind: types.SIDE_CONTEXT,
Focusable: true,
}), ContextCallbackOpts{
OnFocus: onFocus,
OnFocusLost: onFocusLost,
OnRenderToMain: onRenderToMain,
}),
list: viewModel,
viewTrait: NewViewTrait(view),
getDisplayStrings: getDisplayStrings,
c: c,
},
},
}
}
func (self *LocalCommitsContext) GetSelectedItemId() string {
item := self.GetSelected()
if item == nil {
return ""
}
return item.ID()
}
type LocalCommitsViewModel struct {
*BasicViewModel[*models.Commit]
// If this is true we limit the amount of commits we load, for the sake of keeping things fast.
// If the user attempts to scroll past the end of the list, we will load more commits.
limitCommits bool
// If this is true we'll use git log --all when fetching the commits.
showWholeGitGraph bool
}
func NewLocalCommitsViewModel(getModel func() []*models.Commit) *LocalCommitsViewModel {
self := &LocalCommitsViewModel{
BasicViewModel: NewBasicViewModel(getModel),
limitCommits: true,
}
return self
}
func (self *LocalCommitsContext) CanRebase() bool {
return true
}
func (self *LocalCommitsContext) GetSelectedRef() types.Ref {
commit := self.GetSelected()
if commit == nil {
return nil
}
return commit
}
func (self *LocalCommitsViewModel) SetLimitCommits(value bool) {
self.limitCommits = value
}
func (self *LocalCommitsViewModel) GetLimitCommits() bool {
return self.limitCommits
}
func (self *LocalCommitsViewModel) SetShowWholeGitGraph(value bool) {
self.showWholeGitGraph = value
}
func (self *LocalCommitsViewModel) GetShowWholeGitGraph() bool {
return self.showWholeGitGraph
}
func (self *LocalCommitsViewModel) GetCommits() []*models.Commit {
return self.getModel()
}
| pkg/gui/context/local_commits_context.go | 1 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.0036271612625569105,
0.0007061948417685926,
0.00017235234554391354,
0.00024122680770233274,
0.001002797158434987
] |
{
"id": 1,
"code_window": [
"type LogConfig struct {\n",
"\tOrder string `yaml:\"order\"` // one of date-order, author-date-order, topo-order\n",
"\tShowGraph string `yaml:\"showGraph\"` // one of always, never, when-maximised\n",
"}\n",
"\n",
"type CommitPrefixConfig struct {\n",
"\tPattern string `yaml:\"pattern\"`\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tShowWholeGraph bool `yaml:\"showWholeGraph\"`\n"
],
"file_path": "pkg/config/user_config.go",
"type": "add",
"edit_start_line_idx": 108
} | # Change from termbox to tcell
Original GOCUI was written on top of [termbox](https://github.com/nsf/termbox-go) package. This document describes changes which were done to be able to use to [tcell/v2](https://github.com/gdamore/tcell) package.
## Attribute color
Attribute type represents a terminal attribute like color and font effects. Color and font effects can be combined using bitwise OR (`|`).
In `termbox` colors were represented by range 1 to 256. `0` was default color which uses the terminal default setting.
In `tcell` colors can be represented in 24bit, and all of them starts from 0. Valid colors have special flag which gives them real value starting from 4294967296. `0` is a default similart to `termbox`.
The change to support all these colors was made in a way, that original colors from 1 to 256 are backward compatible and if user has color specified as
`Attribute(ansicolor+1)` without the valid color flag, it will be translated to `tcell` color by subtracting 1 and making the color valid by adding the flag. This should ensure backward compatibility.
All the color constants are the same with different underlying values. From user perspective, this should be fine unless some arithmetic is done with it. For example `ColorBlack` was `1` in original version but is `4294967296` in new version.
GOCUI provides a few helper functions which could be used to get the real color value or to create a color attribute.
- `(a Attribute).Hex()` - returns `int32` value of the color represented as `Red << 16 | Green << 8 | Blue`
- `(a Attribute).RGB()` - returns 3 `int32` values for red, green and blue color.
- `GetColor(string)` - creates `Attribute` from color passed as a string. This can be hex value or color name (W3C name).
- `Get256Color(int32)` - creates `Attribute` from color number (ANSI colors).
- `GetRGBColor(int32)` - creates `Attribute` from color number created the same way as `Hex()` function returns.
- `NewRGBColor(int32, int32, int32)` - creates `Attribute` from color numbers for red, green and blue values.
## Attribute font effect
There were 3 attributes for font effect, `AttrBold`, `AttrUnderline` and `AttrReverse`.
`tcell` supports more attributes, so they were added. All of these attributes have different values from before. However they can be used in the same way as before.
All the font effect attributes:
- `AttrBold`
- `AttrBlink`
- `AttrReverse`
- `AttrUnderline`
- `AttrDim`
- `AttrItalic`
- `AttrStrikeThrough`
## OutputMode
`OutputMode` in `termbox` was used to translate colors into the correct range. So for example in `OutputGrayscale` you had colors from 1 - 24 all representing gray colors in range 232 - 255, and white and black color.
`tcell` colors are 24bit and they are translated by the library into the color which can be read by terminal.
The original translation from `termbox` was included in GOCUI to be backward compatible. This is enabled in all the original modes: `OutputNormal`, `Output216`, `OutputGrayscale` and `Output256`.
`OutputTrue` is a new mode. It is recomended, because in this mode GOCUI doesn't do any kind of translation of the colors and pass them directly to `tcell`. If user wants to use true color in terminal and this mode doesn't work, it might be because of the terminal setup. `tcell` has a documentation what needs to be done, but in short `COLORTERM=truecolor` environment variable should help (see [_examples/colorstrue.go](./_examples/colorstrue.go)). Other way would be to have `TERM` environment variable having value with suffix `-truecolor`. To disable true color set `TCELL_TRUECOLOR=disable`.
## Keybinding
`termbox` had different way of handling input from terminal than `tcell`. This leads to some adjustement on how the keys are represented.
In general, all the keys in GOCUI should be presented from before, but the underlying values might be different. This could lead to some problems if a user uses different parser to create the `Key` for the keybinding. If using GOCUI parser, everything should be ok.
Mouse is handled differently in `tcell`, but translation was done to keep it in the same way as it was before. However this was harder to test due to different behaviour across the platforms, so if anything is missing or not working, please report.
| vendor/github.com/jesseduffield/gocui/CHANGES_tcell.md | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.00017575579113326967,
0.00017002905951812863,
0.0001638315588934347,
0.00017026712885126472,
0.00000420040441895253
] |
{
"id": 1,
"code_window": [
"type LogConfig struct {\n",
"\tOrder string `yaml:\"order\"` // one of date-order, author-date-order, topo-order\n",
"\tShowGraph string `yaml:\"showGraph\"` // one of always, never, when-maximised\n",
"}\n",
"\n",
"type CommitPrefixConfig struct {\n",
"\tPattern string `yaml:\"pattern\"`\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tShowWholeGraph bool `yaml:\"showWholeGraph\"`\n"
],
"file_path": "pkg/config/user_config.go",
"type": "add",
"edit_start_line_idx": 108
} | package presentation
import (
"os"
"strings"
"testing"
"github.com/gookit/color"
"github.com/jesseduffield/generics/set"
"github.com/jesseduffield/lazygit/pkg/commands/git_commands"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/stretchr/testify/assert"
"github.com/xo/terminfo"
)
func init() {
color.ForceSetColorLevel(terminfo.ColorLevelNone)
}
func formatExpected(expected string) string {
return strings.TrimSpace(strings.ReplaceAll(expected, "\t", ""))
}
func TestGetCommitListDisplayStrings(t *testing.T) {
scenarios := []struct {
testName string
commits []*models.Commit
fullDescription bool
cherryPickedCommitShaSet *set.Set[string]
diffName string
timeFormat string
parseEmoji bool
selectedCommitSha string
startIdx int
length int
showGraph bool
bisectInfo *git_commands.BisectInfo
expected string
focus bool
}{
{
testName: "no commits",
commits: []*models.Commit{},
startIdx: 0,
length: 1,
showGraph: false,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: "",
},
{
testName: "some commits",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1"},
{Name: "commit2", Sha: "sha2"},
},
startIdx: 0,
length: 2,
showGraph: false,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 commit1
sha2 commit2
`),
},
{
testName: "showing graph",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 0,
length: 5,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 ⏣─╮ commit1
sha2 ◯ │ commit2
sha3 ◯─╯ commit3
sha4 ◯ commit4
sha5 ◯ commit5
`),
},
{
testName: "showing graph, including rebase commits",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}, Action: "pick"},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}, Action: "pick"},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 0,
length: 5,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 pick commit1
sha2 pick commit2
sha3 ◯ commit3
sha4 ◯ commit4
sha5 ◯ commit5
`),
},
{
testName: "showing graph, including rebase commits, with offset",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}, Action: "pick"},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}, Action: "pick"},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 1,
length: 10,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha2 pick commit2
sha3 ◯ commit3
sha4 ◯ commit4
sha5 ◯ commit5
`),
},
{
testName: "startIdx is passed TODO commits",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}, Action: "pick"},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}, Action: "pick"},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 3,
length: 2,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha4 ◯ commit4
sha5 ◯ commit5
`),
},
{
testName: "only showing TODO commits",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}, Action: "pick"},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}, Action: "pick"},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 0,
length: 2,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 pick commit1
sha2 pick commit2
`),
},
{
testName: "no TODO commits, towards bottom",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 4,
length: 2,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha5 ◯ commit5
`),
},
{
testName: "only TODO commits except last",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", Parents: []string{"sha2", "sha3"}, Action: "pick"},
{Name: "commit2", Sha: "sha2", Parents: []string{"sha3"}, Action: "pick"},
{Name: "commit3", Sha: "sha3", Parents: []string{"sha4"}, Action: "pick"},
{Name: "commit4", Sha: "sha4", Parents: []string{"sha5"}, Action: "pick"},
{Name: "commit5", Sha: "sha5", Parents: []string{"sha7"}},
},
startIdx: 0,
length: 2,
showGraph: true,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 pick commit1
sha2 pick commit2
`),
},
{
testName: "custom time format",
commits: []*models.Commit{
{Name: "commit1", Sha: "sha1", UnixTimestamp: 1652443200, AuthorName: "Jesse Duffield"},
{Name: "commit2", Sha: "sha2", UnixTimestamp: 1652529600, AuthorName: "Jesse Duffield"},
},
fullDescription: true,
timeFormat: "2006-01-02 15:04:05",
startIdx: 0,
length: 2,
showGraph: false,
bisectInfo: git_commands.NewNullBisectInfo(),
cherryPickedCommitShaSet: set.New[string](),
expected: formatExpected(`
sha1 2022-05-13 12:00:00 Jesse Duffield commit1
sha2 2022-05-14 12:00:00 Jesse Duffield commit2
`),
},
}
os.Setenv("TZ", "UTC")
focusing := false
for _, scenario := range scenarios {
if scenario.focus {
focusing = true
}
}
for _, s := range scenarios {
s := s
if !focusing || s.focus {
t.Run(s.testName, func(t *testing.T) {
result := GetCommitListDisplayStrings(
s.commits,
s.fullDescription,
s.cherryPickedCommitShaSet,
s.diffName,
s.timeFormat,
s.parseEmoji,
s.selectedCommitSha,
s.startIdx,
s.length,
s.showGraph,
s.bisectInfo,
)
renderedResult := utils.RenderDisplayStrings(result)
t.Logf("\n%s", renderedResult)
assert.EqualValues(t, s.expected, renderedResult)
})
}
}
}
| pkg/gui/presentation/commits_test.go | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.007913683541119099,
0.0006865162285976112,
0.00016582486568950117,
0.00022539033670909703,
0.001472946023568511
] |
{
"id": 1,
"code_window": [
"type LogConfig struct {\n",
"\tOrder string `yaml:\"order\"` // one of date-order, author-date-order, topo-order\n",
"\tShowGraph string `yaml:\"showGraph\"` // one of always, never, when-maximised\n",
"}\n",
"\n",
"type CommitPrefixConfig struct {\n",
"\tPattern string `yaml:\"pattern\"`\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tShowWholeGraph bool `yaml:\"showWholeGraph\"`\n"
],
"file_path": "pkg/config/user_config.go",
"type": "add",
"edit_start_line_idx": 108
} | {
"description": "This test verifies that merge conflicts resolved externally are recognised by lazygit and the user is prompted to continue",
"speed": 10
}
| test/integration/mergeConflictsResolvedExternally/test.json | 0 | https://github.com/jesseduffield/lazygit/commit/666180cfd02f7ff69faa7425caa5eef358955827 | [
0.00017432545428164303,
0.00017432545428164303,
0.00017432545428164303,
0.00017432545428164303,
0
] |