hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 13,
"code_window": [
"\t\t}\n",
"\t\tif done != test.done || msg != test.msg {\n",
"\t\t\tt.Errorf(\"deployment with generation %d, %d replicas specified, and status:\\n%+v\\nreturned:\\n%q, %t\\nwant:\\n%q, %t\",\n",
"\t\t\t\ttest.generation,\n",
"\t\t\t\ttest.specReplicas,\n",
"\t\t\t\ttest.status,\n",
"\t\t\t\tmsg,\n",
"\t\t\t\tdone,\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tt.Errorf(\"DeploymentStatusViewer.Status() for deployment with generation %d, %d replicas specified, and status %+v returned %q, %t, want %q, %t\",\n"
],
"file_path": "pkg/kubectl/rollout_status_test.go",
"type": "replace",
"edit_start_line_idx": 160
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// StatusViewer provides an interface for resources that have rollout status.
type StatusViewer interface {
Status(namespace, name string, revision int64) (string, bool, error)
}
func StatusViewerFor(kind schema.GroupKind, c internalclientset.Interface) (StatusViewer, error) {
switch kind {
case extensions.Kind("Deployment"), apps.Kind("Deployment"):
return &DeploymentStatusViewer{c.Extensions()}, nil
case extensions.Kind("DaemonSet"):
return &DaemonSetStatusViewer{c.Extensions()}, nil
}
return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
type DeploymentStatusViewer struct {
c extensionsclient.DeploymentsGetter
}
type DaemonSetStatusViewer struct {
c extensionsclient.DaemonSetsGetter
}
// Status returns a message describing deployment status, and a bool value indicating if the status is considered done
func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) (string, bool, error) {
deployment, err := s.c.Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", false, err
}
if revision > 0 {
deploymentRev, err := util.Revision(deployment)
if err != nil {
return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err)
}
if revision != deploymentRev {
return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev)
}
}
if deployment.Generation <= deployment.Status.ObservedGeneration {
cond := util.GetDeploymentConditionInternal(deployment.Status, extensions.DeploymentProgressing)
if cond != nil && cond.Reason == util.TimedOutReason {
return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name)
}
if deployment.Status.UpdatedReplicas < deployment.Spec.Replicas {
return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil
}
if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
return fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil
}
minRequired := deployment.Spec.Replicas - util.MaxUnavailableInternal(*deployment)
if deployment.Status.AvailableReplicas < minRequired {
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available (minimum required: %d)...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas, minRequired), false, nil
}
return fmt.Sprintf("deployment %q successfully rolled out\n", name), true, nil
}
return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil
}
// Status returns a message describing daemon set status, and a bool value indicating if the status is considered done
func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) (string, bool, error) {
//ignoring revision as DaemonSets does not have history yet
daemon, err := s.c.DaemonSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", false, err
}
if daemon.Spec.UpdateStrategy.Type != extensions.RollingUpdateDaemonSetStrategyType {
return "", true, fmt.Errorf("Status is available only for RollingUpdate strategy type")
}
if daemon.Generation <= daemon.Status.ObservedGeneration {
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new pods have been updated...\n", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil
}
maxUnavailable, _ := intstrutil.GetValueFromIntOrPercent(&daemon.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(daemon.Status.DesiredNumberScheduled), true)
minRequired := daemon.Status.DesiredNumberScheduled - int32(maxUnavailable)
if daemon.Status.NumberAvailable < minRequired {
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available (minimum required: %d)...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled, minRequired), false, nil
}
return fmt.Sprintf("daemon set %q successfully rolled out\n", name), true, nil
}
return fmt.Sprintf("Waiting for daemon set spec update to be observed...\n"), false, nil
}
| pkg/kubectl/rollout_status.go | 1 | https://github.com/kubernetes/kubernetes/commit/1923cc60c95943543f036a1c9633f3c4219c917f | [
0.002101072110235691,
0.0004827352240681648,
0.00016821862664073706,
0.0002861689426936209,
0.0005226670764386654
] |
{
"id": 13,
"code_window": [
"\t\t}\n",
"\t\tif done != test.done || msg != test.msg {\n",
"\t\t\tt.Errorf(\"deployment with generation %d, %d replicas specified, and status:\\n%+v\\nreturned:\\n%q, %t\\nwant:\\n%q, %t\",\n",
"\t\t\t\ttest.generation,\n",
"\t\t\t\ttest.specReplicas,\n",
"\t\t\t\ttest.status,\n",
"\t\t\t\tmsg,\n",
"\t\t\t\tdone,\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tt.Errorf(\"DeploymentStatusViewer.Status() for deployment with generation %d, %d replicas specified, and status %+v returned %q, %t, want %q, %t\",\n"
],
"file_path": "pkg/kubectl/rollout_status_test.go",
"type": "replace",
"edit_start_line_idx": 160
} | /*
Copyright (c) 2015-2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"fmt"
"io"
"math/rand"
"os"
"path"
"strings"
"context"
"net/http"
"net/url"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/session"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
)
// DatastoreNoSuchDirectoryError is returned when a directory could not be found.
type DatastoreNoSuchDirectoryError struct {
verb string
subject string
}
func (e DatastoreNoSuchDirectoryError) Error() string {
return fmt.Sprintf("cannot %s '%s': No such directory", e.verb, e.subject)
}
// DatastoreNoSuchFileError is returned when a file could not be found.
type DatastoreNoSuchFileError struct {
verb string
subject string
}
func (e DatastoreNoSuchFileError) Error() string {
return fmt.Sprintf("cannot %s '%s': No such file", e.verb, e.subject)
}
type Datastore struct {
Common
DatacenterPath string
}
func NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore {
return &Datastore{
Common: NewCommon(c, ref),
}
}
func (d Datastore) Path(path string) string {
return (&DatastorePath{
Datastore: d.Name(),
Path: path,
}).String()
}
// NewURL constructs a url.URL with the given file path for datastore access over HTTP.
func (d Datastore) NewURL(path string) *url.URL {
u := d.c.URL()
return &url.URL{
Scheme: u.Scheme,
Host: u.Host,
Path: fmt.Sprintf("/folder/%s", path),
RawQuery: url.Values{
"dcPath": []string{d.DatacenterPath},
"dsName": []string{d.Name()},
}.Encode(),
}
}
// URL is deprecated, use NewURL instead.
func (d Datastore) URL(ctx context.Context, dc *Datacenter, path string) (*url.URL, error) {
return d.NewURL(path), nil
}
func (d Datastore) Browser(ctx context.Context) (*HostDatastoreBrowser, error) {
var do mo.Datastore
err := d.Properties(ctx, d.Reference(), []string{"browser"}, &do)
if err != nil {
return nil, err
}
return NewHostDatastoreBrowser(d.c, do.Browser), nil
}
func (d Datastore) useServiceTicket() bool {
// If connected to workstation, service ticketing not supported
// If connected to ESX, service ticketing not needed
if !d.c.IsVC() {
return false
}
key := "GOVMOMI_USE_SERVICE_TICKET"
val := d.c.URL().Query().Get(key)
if val == "" {
val = os.Getenv(key)
}
if val == "1" || val == "true" {
return true
}
return false
}
func (d Datastore) useServiceTicketHostName(name string) bool {
// No need if talking directly to ESX.
if !d.c.IsVC() {
return false
}
// If version happens to be < 5.1
if name == "" {
return false
}
// If the HostSystem is using DHCP on a network without dynamic DNS,
// HostSystem.Config.Network.DnsConfig.HostName is set to "localhost" by default.
// This resolves to "localhost.localdomain" by default via /etc/hosts on ESX.
// In that case, we will stick with the HostSystem.Name which is the IP address that
// was used to connect the host to VC.
if name == "localhost.localdomain" {
return false
}
// Still possible to have HostName that don't resolve via DNS,
// so we default to false.
key := "GOVMOMI_USE_SERVICE_TICKET_HOSTNAME"
val := d.c.URL().Query().Get(key)
if val == "" {
val = os.Getenv(key)
}
if val == "1" || val == "true" {
return true
}
return false
}
type datastoreServiceTicketHostKey struct{}
// HostContext returns a Context where the given host will be used for datastore HTTP access
// via the ServiceTicket method.
func (d Datastore) HostContext(ctx context.Context, host *HostSystem) context.Context {
return context.WithValue(ctx, datastoreServiceTicketHostKey{}, host)
}
// ServiceTicket obtains a ticket via AcquireGenericServiceTicket and returns it an http.Cookie with the url.URL
// that can be used along with the ticket cookie to access the given path. An host is chosen at random unless the
// the given Context was created with a specific host via the HostContext method.
func (d Datastore) ServiceTicket(ctx context.Context, path string, method string) (*url.URL, *http.Cookie, error) {
u := d.NewURL(path)
host, ok := ctx.Value(datastoreServiceTicketHostKey{}).(*HostSystem)
if !ok {
if !d.useServiceTicket() {
return u, nil, nil
}
hosts, err := d.AttachedHosts(ctx)
if err != nil {
return nil, nil, err
}
if len(hosts) == 0 {
// Fallback to letting vCenter choose a host
return u, nil, nil
}
// Pick a random attached host
host = hosts[rand.Intn(len(hosts))]
}
ips, err := host.ManagementIPs(ctx)
if err != nil {
return nil, nil, err
}
if len(ips) > 0 {
// prefer a ManagementIP
u.Host = ips[0].String()
} else {
// fallback to inventory name
u.Host, err = host.ObjectName(ctx)
if err != nil {
return nil, nil, err
}
}
// VC datacenter path will not be valid against ESX
q := u.Query()
delete(q, "dcPath")
u.RawQuery = q.Encode()
spec := types.SessionManagerHttpServiceRequestSpec{
Url: u.String(),
// See SessionManagerHttpServiceRequestSpecMethod enum
Method: fmt.Sprintf("http%s%s", method[0:1], strings.ToLower(method[1:])),
}
sm := session.NewManager(d.Client())
ticket, err := sm.AcquireGenericServiceTicket(ctx, &spec)
if err != nil {
return nil, nil, err
}
cookie := &http.Cookie{
Name: "vmware_cgi_ticket",
Value: ticket.Id,
}
if d.useServiceTicketHostName(ticket.HostName) {
u.Host = ticket.HostName
}
d.Client().SetThumbprint(u.Host, ticket.SslThumbprint)
return u, cookie, nil
}
func (d Datastore) uploadTicket(ctx context.Context, path string, param *soap.Upload) (*url.URL, *soap.Upload, error) {
p := soap.DefaultUpload
if param != nil {
p = *param // copy
}
u, ticket, err := d.ServiceTicket(ctx, path, p.Method)
if err != nil {
return nil, nil, err
}
p.Ticket = ticket
return u, &p, nil
}
func (d Datastore) downloadTicket(ctx context.Context, path string, param *soap.Download) (*url.URL, *soap.Download, error) {
p := soap.DefaultDownload
if param != nil {
p = *param // copy
}
u, ticket, err := d.ServiceTicket(ctx, path, p.Method)
if err != nil {
return nil, nil, err
}
p.Ticket = ticket
return u, &p, nil
}
// Upload via soap.Upload with an http service ticket
func (d Datastore) Upload(ctx context.Context, f io.Reader, path string, param *soap.Upload) error {
u, p, err := d.uploadTicket(ctx, path, param)
if err != nil {
return err
}
return d.Client().Upload(f, u, p)
}
// UploadFile via soap.Upload with an http service ticket
func (d Datastore) UploadFile(ctx context.Context, file string, path string, param *soap.Upload) error {
u, p, err := d.uploadTicket(ctx, path, param)
if err != nil {
return err
}
return d.Client().UploadFile(file, u, p)
}
// Download via soap.Download with an http service ticket
func (d Datastore) Download(ctx context.Context, path string, param *soap.Download) (io.ReadCloser, int64, error) {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
return nil, 0, err
}
return d.Client().Download(u, p)
}
// DownloadFile via soap.Download with an http service ticket
func (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
return err
}
return d.Client().DownloadFile(file, u, p)
}
// AttachedHosts returns hosts that have this Datastore attached, accessible and writable.
func (d Datastore) AttachedHosts(ctx context.Context) ([]*HostSystem, error) {
var ds mo.Datastore
var hosts []*HostSystem
pc := property.DefaultCollector(d.Client())
err := pc.RetrieveOne(ctx, d.Reference(), []string{"host"}, &ds)
if err != nil {
return nil, err
}
mounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount)
var refs []types.ManagedObjectReference
for _, host := range ds.Host {
refs = append(refs, host.Key)
mounts[host.Key] = host
}
var hs []mo.HostSystem
err = pc.Retrieve(ctx, refs, []string{"runtime.connectionState", "runtime.powerState"}, &hs)
if err != nil {
return nil, err
}
for _, host := range hs {
if host.Runtime.ConnectionState == types.HostSystemConnectionStateConnected &&
host.Runtime.PowerState == types.HostSystemPowerStatePoweredOn {
mount := mounts[host.Reference()]
info := mount.MountInfo
if *info.Mounted && *info.Accessible && info.AccessMode == string(types.HostMountModeReadWrite) {
hosts = append(hosts, NewHostSystem(d.Client(), mount.Key))
}
}
}
return hosts, nil
}
// AttachedClusterHosts returns hosts that have this Datastore attached, accessible and writable and are members of the given cluster.
func (d Datastore) AttachedClusterHosts(ctx context.Context, cluster *ComputeResource) ([]*HostSystem, error) {
var hosts []*HostSystem
clusterHosts, err := cluster.Hosts(ctx)
if err != nil {
return nil, err
}
attachedHosts, err := d.AttachedHosts(ctx)
if err != nil {
return nil, err
}
refs := make(map[types.ManagedObjectReference]bool)
for _, host := range attachedHosts {
refs[host.Reference()] = true
}
for _, host := range clusterHosts {
if refs[host.Reference()] {
hosts = append(hosts, host)
}
}
return hosts, nil
}
func (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, error) {
b, err := d.Browser(ctx)
if err != nil {
return nil, err
}
spec := types.HostDatastoreBrowserSearchSpec{
Details: &types.FileQueryFlags{
FileType: true,
FileSize: true,
Modification: true,
FileOwner: types.NewBool(true),
},
MatchPattern: []string{path.Base(file)},
}
dsPath := d.Path(path.Dir(file))
task, err := b.SearchDatastore(ctx, dsPath, &spec)
if err != nil {
return nil, err
}
info, err := task.WaitForResult(ctx, nil)
if err != nil {
if info == nil || info.Error != nil {
_, ok := info.Error.Fault.(*types.FileNotFound)
if ok {
// FileNotFound means the base path doesn't exist.
return nil, DatastoreNoSuchDirectoryError{"stat", dsPath}
}
}
return nil, err
}
res := info.Result.(types.HostDatastoreBrowserSearchResults)
if len(res.File) == 0 {
// File doesn't exist
return nil, DatastoreNoSuchFileError{"stat", d.Path(file)}
}
return res.File[0], nil
}
// Type returns the type of file system volume.
func (d Datastore) Type(ctx context.Context) (types.HostFileSystemVolumeFileSystemType, error) {
var mds mo.Datastore
if err := d.Properties(ctx, d.Reference(), []string{"summary.type"}, &mds); err != nil {
return types.HostFileSystemVolumeFileSystemType(""), err
}
return types.HostFileSystemVolumeFileSystemType(mds.Summary.Type), nil
}
| vendor/github.com/vmware/govmomi/object/datastore.go | 0 | https://github.com/kubernetes/kubernetes/commit/1923cc60c95943543f036a1c9633f3c4219c917f | [
0.0006099830497987568,
0.0001867369719548151,
0.00016057092580012977,
0.00017276988364756107,
0.00007265498425113037
] |
{
"id": 13,
"code_window": [
"\t\t}\n",
"\t\tif done != test.done || msg != test.msg {\n",
"\t\t\tt.Errorf(\"deployment with generation %d, %d replicas specified, and status:\\n%+v\\nreturned:\\n%q, %t\\nwant:\\n%q, %t\",\n",
"\t\t\t\ttest.generation,\n",
"\t\t\t\ttest.specReplicas,\n",
"\t\t\t\ttest.status,\n",
"\t\t\t\tmsg,\n",
"\t\t\t\tdone,\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tt.Errorf(\"DeploymentStatusViewer.Status() for deployment with generation %d, %d replicas specified, and status %+v returned %q, %t, want %q, %t\",\n"
],
"file_path": "pkg/kubectl/rollout_status_test.go",
"type": "replace",
"edit_start_line_idx": 160
} | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package route53
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
// Route53 is a client for Route 53.
// The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01
type Route53 struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "route53" // Service endpoint prefix API calls made to.
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
)
// New creates a new instance of the Route53 client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a Route53 client from just a session.
// svc := route53.New(mySession)
//
// // Create a Route53 client with additional configuration
// svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53 {
svc := &Route53{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2013-04-01",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a Route53 operation and runs any
// custom request initialization.
func (c *Route53) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}
| vendor/github.com/aws/aws-sdk-go/service/route53/service.go | 0 | https://github.com/kubernetes/kubernetes/commit/1923cc60c95943543f036a1c9633f3c4219c917f | [
0.00017392216250300407,
0.00016917529865168035,
0.00016315279935952276,
0.00016927893739193678,
0.0000039331644074991345
] |
{
"id": 13,
"code_window": [
"\t\t}\n",
"\t\tif done != test.done || msg != test.msg {\n",
"\t\t\tt.Errorf(\"deployment with generation %d, %d replicas specified, and status:\\n%+v\\nreturned:\\n%q, %t\\nwant:\\n%q, %t\",\n",
"\t\t\t\ttest.generation,\n",
"\t\t\t\ttest.specReplicas,\n",
"\t\t\t\ttest.status,\n",
"\t\t\t\tmsg,\n",
"\t\t\t\tdone,\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tt.Errorf(\"DeploymentStatusViewer.Status() for deployment with generation %d, %d replicas specified, and status %+v returned %q, %t, want %q, %t\",\n"
],
"file_path": "pkg/kubectl/rollout_status_test.go",
"type": "replace",
"edit_start_line_idx": 160
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["docker.go"],
tags = ["automanaged"],
deps = ["//vendor/github.com/docker/engine-api/types:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| vendor/github.com/google/cadvisor/utils/docker/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/1923cc60c95943543f036a1c9633f3c4219c917f | [
0.0001759308361215517,
0.00017491652397438884,
0.00017336483870167285,
0.00017545389709994197,
0.0000011143497431476135
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tif err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(1.1)); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t\tif err := job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {\n",
"\t\t\treturn jobs.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md, ju)\n",
"\t\t}); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn ju.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md)\n"
],
"file_path": "pkg/jobs/jobs_test.go",
"type": "replace",
"edit_start_line_idx": 1021
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package jobs
import (
"bytes"
"context"
"fmt"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
)
// UpdateFn is the callback passed to Job.Update. It is called from the context
// of a transaction and is passed the current metadata for the job. The callback
// can modify metadata using the JobUpdater and the changes will be persisted
// within the same transaction.
//
// The function is free to modify contents of JobMetadata in place (but the
// changes will be ignored unless JobUpdater is used).
type UpdateFn func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error
type Updater struct {
j *Job
txn isql.Txn
}
func (j *Job) NoTxn() Updater {
return Updater{j: j}
}
func (j *Job) WithTxn(txn isql.Txn) Updater {
return Updater{j: j, txn: txn}
}
func (u Updater) update(ctx context.Context, updateFn UpdateFn) (retErr error) {
if u.txn == nil {
return u.j.registry.db.Txn(ctx, func(
ctx context.Context, txn isql.Txn,
) error {
u.txn = txn
return u.update(ctx, updateFn)
})
}
ctx, sp := tracing.ChildSpan(ctx, "update-job")
defer sp.Finish()
var payload *jobspb.Payload
var progress *jobspb.Progress
var status Status
var runStats *RunStats
j := u.j
defer func() {
if retErr != nil {
retErr = errors.Wrapf(retErr, "job %d", j.id)
return
}
j.mu.Lock()
defer j.mu.Unlock()
if payload != nil {
j.mu.payload = *payload
}
if progress != nil {
j.mu.progress = *progress
}
if runStats != nil {
j.mu.runStats = runStats
}
if status != "" {
j.mu.status = status
}
}()
const loadJobQuery = `
WITH
latestpayload AS (
SELECT job_id, value
FROM system.job_info AS payload
WHERE info_key = 'legacy_payload' AND job_id = $1
ORDER BY written DESC LIMIT 1
),
latestprogress AS (
SELECT job_id, value
FROM system.job_info AS progress
WHERE info_key = 'legacy_progress' AND job_id = $1
ORDER BY written DESC LIMIT 1
)
SELECT status, payload.value AS payload, progress.value AS progress,
claim_session_id, COALESCE(last_run, created), COALESCE(num_runs, 0)
FROM system.jobs AS j
INNER JOIN latestpayload AS payload ON j.id = payload.job_id
LEFT JOIN latestprogress AS progress ON j.id = progress.job_id
WHERE id = $1
`
row, err := u.txn.QueryRowEx(
ctx, "select-job", u.txn.KV(),
sessiondata.NodeUserSessionDataOverride,
loadJobQuery, j.ID(),
)
if err != nil {
return err
}
if row == nil {
return errors.Errorf("not found in system.jobs table")
}
if status, err = unmarshalStatus(row[0]); err != nil {
return err
}
if payload, err = UnmarshalPayload(row[1]); err != nil {
return err
}
if progress, err = UnmarshalProgress(row[2]); err != nil {
return err
}
if j.session != nil {
if row[3] == tree.DNull {
return errors.Errorf(
"with status %q: expected session %q but found NULL",
status, j.session.ID())
}
storedSession := []byte(*row[3].(*tree.DBytes))
if !bytes.Equal(storedSession, j.session.ID().UnsafeBytes()) {
return errors.Errorf(
"with status %q: expected session %q but found %q",
status, j.session.ID(), sqlliveness.SessionID(storedSession))
}
} else {
log.VInfof(ctx, 1, "job %d: update called with no session ID", j.ID())
}
lastRun, ok := row[4].(*tree.DTimestamp)
if !ok {
return errors.AssertionFailedf("expected timestamp last_run, but got %T", lastRun)
}
numRuns, ok := row[5].(*tree.DInt)
if !ok {
return errors.AssertionFailedf("expected int num_runs, but got %T", numRuns)
}
md := JobMetadata{
ID: j.ID(),
Status: status,
Payload: payload,
Progress: progress,
RunStats: &RunStats{
NumRuns: int(*numRuns),
LastRun: lastRun.Time,
},
}
var ju JobUpdater
if err := updateFn(u.txn, md, &ju); err != nil {
return err
}
if j.registry.knobs.BeforeUpdate != nil {
if err := j.registry.knobs.BeforeUpdate(md, ju.md); err != nil {
return err
}
}
if !ju.hasUpdates() {
return nil
}
// Build a statement of the following form, depending on which properties
// need updating:
//
// UPDATE system.jobs
// SET
// [status = $2,]
// [payload = $y,]
// [progress = $z]
// WHERE
// id = $1
var setters []string
params := []interface{}{j.ID()} // $1 is always the job ID.
addSetter := func(column string, value interface{}) {
params = append(params, value)
setters = append(setters, fmt.Sprintf("%s = $%d", column, len(params)))
}
if ju.md.Status != "" {
addSetter("status", ju.md.Status)
}
if ju.md.RunStats != nil {
runStats = ju.md.RunStats
addSetter("last_run", ju.md.RunStats.LastRun)
addSetter("num_runs", ju.md.RunStats.NumRuns)
}
var payloadBytes []byte
if ju.md.Payload != nil {
payload = ju.md.Payload
var err error
payloadBytes, err = protoutil.Marshal(payload)
if err != nil {
return err
}
}
var progressBytes []byte
if ju.md.Progress != nil {
progress = ju.md.Progress
progress.ModifiedMicros = timeutil.ToUnixMicros(u.now())
var err error
progressBytes, err = protoutil.Marshal(progress)
if err != nil {
return err
}
}
if len(setters) != 0 {
updateStmt := fmt.Sprintf(
"UPDATE system.jobs SET %s WHERE id = $1",
strings.Join(setters, ", "),
)
n, err := u.txn.ExecEx(
ctx, "job-update", u.txn.KV(),
sessiondata.NodeUserSessionDataOverride,
updateStmt, params...,
)
if err != nil {
return err
}
if n != 1 {
return errors.Errorf(
"expected exactly one row affected, but %d rows affected by job update", n,
)
}
}
// Insert the job payload and progress into the system.jobs_info table.
infoStorage := j.InfoStorage(u.txn)
if payloadBytes != nil {
if err := infoStorage.WriteLegacyPayload(ctx, payloadBytes); err != nil {
return err
}
}
if progressBytes != nil {
if err := infoStorage.WriteLegacyProgress(ctx, progressBytes); err != nil {
return err
}
}
return nil
}
// RunStats consists of job-run statistics: num of runs and last-run timestamp.
type RunStats struct {
LastRun time.Time
NumRuns int
}
// JobMetadata groups the job metadata values passed to UpdateFn.
type JobMetadata struct {
ID jobspb.JobID
Status Status
Payload *jobspb.Payload
Progress *jobspb.Progress
RunStats *RunStats
}
// CheckRunningOrReverting returns an InvalidStatusError if md.Status is not
// StatusRunning or StatusReverting.
func (md *JobMetadata) CheckRunningOrReverting() error {
if md.Status != StatusRunning && md.Status != StatusReverting {
return &InvalidStatusError{md.ID, md.Status, "update progress on", md.Payload.Error}
}
return nil
}
// JobUpdater accumulates changes to job metadata that are to be persisted.
type JobUpdater struct {
md JobMetadata
}
// UpdateStatus sets a new status (to be persisted).
func (ju *JobUpdater) UpdateStatus(status Status) {
ju.md.Status = status
}
// UpdatePayload sets a new Payload (to be persisted).
//
// WARNING: the payload can be large (resulting in a large KV for each version);
// it shouldn't be updated frequently.
func (ju *JobUpdater) UpdatePayload(payload *jobspb.Payload) {
ju.md.Payload = payload
}
// UpdateProgress sets a new Progress (to be persisted).
func (ju *JobUpdater) UpdateProgress(progress *jobspb.Progress) {
ju.md.Progress = progress
}
func (ju *JobUpdater) hasUpdates() bool {
return ju.md != JobMetadata{}
}
// UpdateRunStats is used to update the exponential-backoff parameters last_run and
// num_runs in system.jobs table.
func (ju *JobUpdater) UpdateRunStats(numRuns int, lastRun time.Time) {
ju.md.RunStats = &RunStats{
NumRuns: numRuns,
LastRun: lastRun,
}
}
// UpdateHighwaterProgressed updates job updater progress with the new high water mark.
func UpdateHighwaterProgressed(highWater hlc.Timestamp, md JobMetadata, ju *JobUpdater) error {
if err := md.CheckRunningOrReverting(); err != nil {
return err
}
if highWater.Less(hlc.Timestamp{}) {
return errors.Errorf("high-water %s is outside allowable range > 0.0", highWater)
}
md.Progress.Progress = &jobspb.Progress_HighWater{
HighWater: &highWater,
}
ju.UpdateProgress(md.Progress)
return nil
}
// Update is used to read the metadata for a job and potentially update it.
//
// The updateFn is called in the context of a transaction and is passed the
// current metadata for the job. It can choose to update parts of the metadata
// using the JobUpdater, causing them to be updated within the same transaction.
//
// Sample usage:
//
// err := j.Update(ctx, func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
// if md.Status != StatusRunning {
// return errors.New("job no longer running")
// }
// ju.UpdateStatus(StatusPaused)
// // <modify md.Payload>
// ju.UpdatePayload(md.Payload)
// }
//
// Note that there are various convenience wrappers (like FractionProgressed)
// defined in jobs.go.
func (u Updater) Update(ctx context.Context, updateFn UpdateFn) error {
return u.update(ctx, updateFn)
}
func (u Updater) now() time.Time {
return u.j.registry.clock.Now().GoTime()
}
| pkg/jobs/update.go | 1 | https://github.com/cockroachdb/cockroach/commit/66345a5db9de739faf4edbbaaae4c606daaec03f | [
0.00363012682646513,
0.0008507987367920578,
0.0001562792167533189,
0.00021113621187396348,
0.0009351781918667257
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tif err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(1.1)); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t\tif err := job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {\n",
"\t\t\treturn jobs.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md, ju)\n",
"\t\t}); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn ju.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md)\n"
],
"file_path": "pkg/jobs/jobs_test.go",
"type": "replace",
"edit_start_line_idx": 1021
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/clusterunique"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/parser/statements"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/errors"
)
// CreateTestTableDescriptor converts a SQL string to a table for test purposes.
// Will fail on complex tables where that operation requires e.g. looking up
// other tables.
func CreateTestTableDescriptor(
ctx context.Context,
parentID, id descpb.ID,
schema string,
privileges *catpb.PrivilegeDescriptor,
txn *kv.Txn,
collection *descs.Collection,
) (*tabledesc.Mutable, error) {
st := cluster.MakeTestingClusterSettings()
stmt, err := parser.ParseOne(schema)
if err != nil {
return nil, err
}
semaCtx := tree.MakeSemaContext()
evalCtx := eval.MakeTestingEvalContext(st)
sessionData := &sessiondata.SessionData{
LocalOnlySessionData: sessiondatapb.LocalOnlySessionData{
EnableUniqueWithoutIndexConstraints: true,
},
}
switch n := stmt.AST.(type) {
case *tree.CreateTable:
db := dbdesc.NewInitial(parentID, "test", username.RootUserName())
desc, err := NewTableDesc(
ctx,
nil, /* txn */
NewSkippingCacheSchemaResolver(collection, sessiondata.NewStack(sessionData), txn, nil),
st,
n,
db,
schemadesc.GetPublicSchema(),
id,
nil, /* regionConfig */
hlc.Timestamp{}, /* creationTime */
privileges,
make(map[descpb.ID]*tabledesc.Mutable),
&semaCtx,
&evalCtx,
sessionData,
tree.PersistencePermanent,
)
return desc, err
case *tree.CreateSequence:
desc, err := NewSequenceTableDesc(
ctx,
nil, /* planner */
st,
n.Name.Table(),
n.Options,
parentID, keys.PublicSchemaID, id,
hlc.Timestamp{}, /* creationTime */
privileges,
tree.PersistencePermanent,
false, /* isMultiRegion */
)
return desc, err
default:
return nil, errors.Errorf("unexpected AST %T", stmt.AST)
}
}
// StmtBufReader is an exported interface for reading a StmtBuf.
// Normally only the write interface of the buffer is exported, as it is used by
// the pgwire.
type StmtBufReader struct {
buf *StmtBuf
}
// MakeStmtBufReader creates a StmtBufReader.
func MakeStmtBufReader(buf *StmtBuf) StmtBufReader {
return StmtBufReader{buf: buf}
}
// CurCmd returns the current command in the buffer.
func (r StmtBufReader) CurCmd() (Command, error) {
cmd, _ /* pos */, err := r.buf.CurCmd()
return cmd, err
}
// AdvanceOne moves the cursor one position over.
func (r *StmtBufReader) AdvanceOne() {
r.buf.AdvanceOne()
}
// Exec is a test utility function that takes a localPlanner (of type
// interface{} so that external packages can call NewInternalPlanner and pass
// the result) and executes a sql statement through the DistSQLPlanner.
func (dsp *DistSQLPlanner) Exec(
ctx context.Context,
localPlanner interface{},
stmt statements.Statement[tree.Statement],
distribute bool,
) error {
p := localPlanner.(*planner)
p.stmt = makeStatement(stmt, clusterunique.ID{} /* queryID */)
if err := p.makeOptimizerPlan(ctx); err != nil {
return err
}
defer p.curPlan.close(ctx)
rw := NewCallbackResultWriter(func(ctx context.Context, row tree.Datums) error {
return nil
})
execCfg := p.ExecCfg()
recv := MakeDistSQLReceiver(
ctx,
rw,
stmt.AST.StatementReturnType(),
execCfg.RangeDescriptorCache,
p.txn,
execCfg.Clock,
p.ExtendedEvalContext().Tracing,
)
defer recv.Release()
distributionType := DistributionType(DistributionTypeNone)
if distribute {
distributionType = DistributionTypeSystemTenantOnly
}
evalCtx := p.ExtendedEvalContext()
planCtx := execCfg.DistSQLPlanner.NewPlanningCtx(ctx, evalCtx, p, p.txn,
distributionType)
planCtx.stmtType = recv.stmtType
dsp.PlanAndRun(ctx, evalCtx, planCtx, p.txn, p.curPlan.main, recv, nil /* finishedSetupFn */)
return rw.Err()
}
// ExecLocalAll is basically a conn_executor free version of execWithDistSQLEngine
// hard coded for non-distributed statements (currently used by copy testing).
func (dsp *DistSQLPlanner) ExecLocalAll(
ctx context.Context, execCfg ExecutorConfig, p *planner, res RestrictedCommandResult,
) error {
defer p.curPlan.close(ctx)
recv := MakeDistSQLReceiver(
ctx,
res,
p.stmt.AST.StatementReturnType(),
execCfg.RangeDescriptorCache,
p.txn,
execCfg.Clock,
p.ExtendedEvalContext().Tracing,
)
defer recv.Release()
distributionType := DistributionType(DistributionTypeNone)
evalCtx := p.ExtendedEvalContext()
planCtx := execCfg.DistSQLPlanner.NewPlanningCtx(ctx, evalCtx, p, p.txn,
distributionType)
planCtx.stmtType = recv.stmtType
var factoryEvalCtx = extendedEvalContext{Tracing: &SessionTracing{}}
evalCtxFactory := func(bool) *extendedEvalContext {
factoryEvalCtx.Context = evalCtx.Context
factoryEvalCtx.Placeholders = &p.semaCtx.Placeholders
factoryEvalCtx.Annotations = &p.semaCtx.Annotations
return &factoryEvalCtx
}
return dsp.PlanAndRunAll(ctx, evalCtx, planCtx, p, recv, evalCtxFactory)
}
| pkg/sql/testutils.go | 0 | https://github.com/cockroachdb/cockroach/commit/66345a5db9de739faf4edbbaaae4c606daaec03f | [
0.00032487165299244225,
0.00017682823818176985,
0.00015983829507604241,
0.0001696226536296308,
0.00003446239497861825
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tif err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(1.1)); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t\tif err := job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {\n",
"\t\t\treturn jobs.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md, ju)\n",
"\t\t}); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn ju.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md)\n"
],
"file_path": "pkg/jobs/jobs_test.go",
"type": "replace",
"edit_start_line_idx": 1021
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "quotapool",
srcs = [
"config.go",
"int_rate.go",
"intpool.go",
"notify_queue.go",
"quotapool.go",
],
importpath = "github.com/cockroachdb/cockroach/pkg/util/quotapool",
visibility = ["//visibility:public"],
deps = [
"//pkg/util/log",
"//pkg/util/syncutil",
"//pkg/util/timeutil",
"@com_github_cockroachdb_errors//:errors",
"@com_github_cockroachdb_redact//:redact",
"@com_github_cockroachdb_tokenbucket//:tokenbucket",
],
)
go_test(
name = "quotapool_test",
size = "small",
srcs = [
"bench_test.go",
"example_test.go",
"int_rate_test.go",
"intpool_test.go",
"node_size_test.go",
"notify_queue_test.go",
],
embed = [":quotapool"],
deps = [
"//pkg/testutils",
"//pkg/util/ctxgroup",
"//pkg/util/leaktest",
"//pkg/util/timeutil",
"@com_github_cockroachdb_errors//:errors",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_golang_x_sync//errgroup",
],
)
| pkg/util/quotapool/BUILD.bazel | 0 | https://github.com/cockroachdb/cockroach/commit/66345a5db9de739faf4edbbaaae4c606daaec03f | [
0.0001773571129888296,
0.0001743389293551445,
0.00017092932830564678,
0.00017437015776522458,
0.0000020954112187610008
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tif err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(1.1)); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t\tif err := job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {\n",
"\t\t\treturn jobs.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md, ju)\n",
"\t\t}); !testutils.IsError(err, \"outside allowable range\") {\n",
"\t\t\tt.Fatalf(\"expected 'outside allowable range' error, but got %v\", err)\n",
"\t\t}\n",
"\t})\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\treturn ju.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md)\n"
],
"file_path": "pkg/jobs/jobs_test.go",
"type": "replace",
"edit_start_line_idx": 1021
} | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"context"
"math"
"time"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb"
"github.com/cockroachdb/cockroach/pkg/util/grunning"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"go.etcd.io/raft/v3/raftpb"
)
var (
logRaftRecvQueueFullEvery = log.Every(1 * time.Second)
logRaftSendQueueFullEvery = log.Every(1 * time.Second)
)
type raftRequestInfo struct {
req *kvserverpb.RaftMessageRequest
size int64 // size of req in bytes
respStream RaftMessageResponseStream
}
type raftReceiveQueue struct {
mu struct { // not to be locked directly
destroyed bool
syncutil.Mutex
infos []raftRequestInfo
}
maxLen int
acc mon.BoundAccount
}
// Len returns the number of requests in the queue.
func (q *raftReceiveQueue) Len() int {
q.mu.Lock()
defer q.mu.Unlock()
return len(q.mu.infos)
}
// Drain moves the stored requests out of the queue, returning them to
// the caller. Returns true if the returned slice was not empty.
func (q *raftReceiveQueue) Drain() ([]raftRequestInfo, bool) {
q.mu.Lock()
defer q.mu.Unlock()
return q.drainLocked()
}
func (q *raftReceiveQueue) drainLocked() ([]raftRequestInfo, bool) {
if len(q.mu.infos) == 0 {
return nil, false
}
infos := q.mu.infos
q.mu.infos = nil
q.acc.Clear(context.Background())
return infos, true
}
func (q *raftReceiveQueue) Delete() {
q.mu.Lock()
defer q.mu.Unlock()
q.drainLocked()
if err := q.acc.ResizeTo(context.Background(), 0); err != nil {
panic(err) // ResizeTo(., 0) always returns nil
}
q.mu.destroyed = true
}
// Recycle makes a slice that the caller knows will no longer be accessed
// available for reuse.
func (q *raftReceiveQueue) Recycle(processed []raftRequestInfo) {
if cap(processed) > 4 {
return // cap recycled slice lengths
}
q.mu.Lock()
defer q.mu.Unlock()
if q.mu.infos == nil {
for i := range processed {
processed[i] = raftRequestInfo{}
}
q.mu.infos = processed[:0]
}
}
func (q *raftReceiveQueue) Append(
req *kvserverpb.RaftMessageRequest, s RaftMessageResponseStream,
) (shouldQueue bool, size int64, appended bool) {
size = int64(req.Size())
q.mu.Lock()
defer q.mu.Unlock()
if q.mu.destroyed || len(q.mu.infos) >= q.maxLen {
return false, size, false
}
if q.acc.Grow(context.Background(), size) != nil {
return false, size, false
}
q.mu.infos = append(q.mu.infos, raftRequestInfo{
req: req,
respStream: s,
size: size,
})
// The operation that enqueues the first message will
// be put in charge of triggering a drain of the queue.
return len(q.mu.infos) == 1, size, true
}
type raftReceiveQueues struct {
mon *mon.BytesMonitor
m syncutil.IntMap // RangeID -> *raftReceiveQueue
}
func (qs *raftReceiveQueues) Load(rangeID roachpb.RangeID) (*raftReceiveQueue, bool) {
value, ok := qs.m.Load(int64(rangeID))
return (*raftReceiveQueue)(value), ok
}
func (qs *raftReceiveQueues) LoadOrCreate(
rangeID roachpb.RangeID, maxLen int,
) (_ *raftReceiveQueue, loaded bool) {
if q, ok := qs.Load(rangeID); ok {
return q, ok // fast path
}
q := &raftReceiveQueue{maxLen: maxLen}
q.acc.Init(context.Background(), qs.mon)
value, loaded := qs.m.LoadOrStore(int64(rangeID), unsafe.Pointer(q))
return (*raftReceiveQueue)(value), loaded
}
// Delete drains the queue and marks it as deleted. Future Appends
// will result in appended=false.
func (qs *raftReceiveQueues) Delete(rangeID roachpb.RangeID) {
if q, ok := qs.Load(rangeID); ok {
q.Delete()
qs.m.Delete(int64(rangeID))
}
}
// HandleDelegatedSnapshot reads the incoming delegated snapshot message and
// throttles sending snapshots before passing the request to the sender replica.
func (s *Store) HandleDelegatedSnapshot(
ctx context.Context, req *kvserverpb.DelegateSendSnapshotRequest,
) *kvserverpb.DelegateSnapshotResponse {
ctx = s.AnnotateCtx(ctx)
if fn := s.cfg.TestingKnobs.SendSnapshot; fn != nil {
fn(req)
}
sp := tracing.SpanFromContext(ctx)
// This can happen if the delegate doesn't know about the range yet. Return an
// error immediately.
sender, err := s.GetReplica(req.RangeID)
if err != nil {
return &kvserverpb.DelegateSnapshotResponse{
Status: kvserverpb.DelegateSnapshotResponse_ERROR,
EncodedError: errors.EncodeError(context.Background(), err),
CollectedSpans: sp.GetConfiguredRecording(),
}
}
// Pass the request to the sender replica.
msgAppResp, err := sender.followerSendSnapshot(ctx, req.RecipientReplica, req)
if err != nil {
// If an error occurred during snapshot sending, send an error response.
return &kvserverpb.DelegateSnapshotResponse{
Status: kvserverpb.DelegateSnapshotResponse_ERROR,
EncodedError: errors.EncodeError(context.Background(), err),
CollectedSpans: sp.GetConfiguredRecording(),
}
}
return &kvserverpb.DelegateSnapshotResponse{
Status: kvserverpb.DelegateSnapshotResponse_APPLIED,
CollectedSpans: sp.GetConfiguredRecording(),
MsgAppResp: msgAppResp,
}
}
// HandleSnapshot reads an incoming streaming snapshot and applies it if
// possible.
func (s *Store) HandleSnapshot(
ctx context.Context, header *kvserverpb.SnapshotRequest_Header, stream SnapshotResponseStream,
) error {
if fn := s.cfg.TestingKnobs.HandleSnapshotDone; fn != nil {
defer fn()
}
ctx = s.AnnotateCtx(ctx)
const name = "storage.Store: handle snapshot"
return s.stopper.RunTaskWithErr(ctx, name, func(ctx context.Context) error {
s.metrics.RaftRcvdMessages[raftpb.MsgSnap].Inc(1)
err := s.receiveSnapshot(ctx, header, stream)
if err != nil && ctx.Err() != nil {
// Log trace of incoming snapshot on context cancellation (e.g.
// times out or caller goes away).
log.Infof(ctx, "incoming snapshot stream failed with error: %v\ntrace:\n%v",
err, tracing.SpanFromContext(ctx).GetConfiguredRecording())
}
return err
})
}
func (s *Store) uncoalesceBeats(
ctx context.Context,
beats []kvserverpb.RaftHeartbeat,
fromReplica, toReplica roachpb.ReplicaDescriptor,
msgT raftpb.MessageType,
respStream RaftMessageResponseStream,
) {
if len(beats) == 0 {
return
}
if log.V(4) {
log.Infof(ctx, "uncoalescing %d beats of type %v: %+v", len(beats), msgT, beats)
}
beatReqs := make([]kvserverpb.RaftMessageRequest, len(beats))
batch := s.scheduler.NewEnqueueBatch()
defer batch.Close()
for i, beat := range beats {
msg := raftpb.Message{
Type: msgT,
From: uint64(beat.FromReplicaID),
To: uint64(beat.ToReplicaID),
Term: uint64(beat.Term),
Commit: uint64(beat.Commit),
}
beatReqs[i] = kvserverpb.RaftMessageRequest{
RangeID: beat.RangeID,
FromReplica: roachpb.ReplicaDescriptor{
NodeID: fromReplica.NodeID,
StoreID: fromReplica.StoreID,
ReplicaID: beat.FromReplicaID,
},
ToReplica: roachpb.ReplicaDescriptor{
NodeID: toReplica.NodeID,
StoreID: toReplica.StoreID,
ReplicaID: beat.ToReplicaID,
},
Message: msg,
Quiesce: beat.Quiesce,
LaggingFollowersOnQuiesce: beat.LaggingFollowersOnQuiesce,
}
if log.V(4) {
log.Infof(ctx, "uncoalesced beat: %+v", beatReqs[i])
}
enqueue := s.HandleRaftUncoalescedRequest(ctx, &beatReqs[i], respStream)
if enqueue {
batch.Add(beat.RangeID)
}
}
s.scheduler.EnqueueRaftRequests(batch)
}
// HandleRaftRequest dispatches a raft message to the appropriate Replica. It
// requires that s.mu is not held.
func (s *Store) HandleRaftRequest(
ctx context.Context, req *kvserverpb.RaftMessageRequest, respStream RaftMessageResponseStream,
) *kvpb.Error {
comparisonResult := s.getLocalityComparison(ctx, req.FromReplica.NodeID, req.ToReplica.NodeID)
s.metrics.updateCrossLocalityMetricsOnIncomingRaftMsg(comparisonResult, int64(req.Size()))
// NB: unlike the other two IncomingRaftMessageHandler methods implemented by
// Store, this one doesn't need to directly run through a Stopper task because
// it delegates all work through a raftScheduler, whose workers' lifetimes are
// already tied to the Store's Stopper.
if len(req.Heartbeats)+len(req.HeartbeatResps) > 0 {
if req.RangeID != 0 {
log.Fatalf(ctx, "coalesced heartbeats must have rangeID == 0")
}
s.uncoalesceBeats(ctx, req.Heartbeats, req.FromReplica, req.ToReplica, raftpb.MsgHeartbeat, respStream)
s.uncoalesceBeats(ctx, req.HeartbeatResps, req.FromReplica, req.ToReplica, raftpb.MsgHeartbeatResp, respStream)
return nil
}
enqueue := s.HandleRaftUncoalescedRequest(ctx, req, respStream)
if enqueue {
s.scheduler.EnqueueRaftRequest(req.RangeID)
}
return nil
}
// HandleRaftUncoalescedRequest dispatches a raft message to the appropriate
// Replica. The method returns whether the Range needs to be enqueued in the
// Raft scheduler. It requires that s.mu is not held.
func (s *Store) HandleRaftUncoalescedRequest(
ctx context.Context, req *kvserverpb.RaftMessageRequest, respStream RaftMessageResponseStream,
) (enqueue bool) {
if len(req.Heartbeats)+len(req.HeartbeatResps) > 0 {
log.Fatalf(ctx, "HandleRaftUncoalescedRequest cannot be given coalesced heartbeats or heartbeat responses, received %s", req)
}
// HandleRaftRequest is called on locally uncoalesced heartbeats (which are
// not sent over the network if the environment variable is set) so do not
// count them.
s.metrics.RaftRcvdMessages[req.Message.Type].Inc(1)
// NB: add a buffer for extra messages, to allow heartbeats getting through
// even if MsgApp quota is maxed out by the sender.
q, _ := s.raftRecvQueues.LoadOrCreate(req.RangeID,
s.cfg.RaftMaxInflightMsgs+replicaQueueExtraSize)
enqueue, size, appended := q.Append(req, respStream)
if !appended {
// TODO(peter): Return an error indicating the request was dropped. Note
// that dropping the request is safe. Raft will retry.
s.metrics.RaftRcvdDropped.Inc(1)
s.metrics.RaftRcvdDroppedBytes.Inc(size)
if logRaftRecvQueueFullEvery.ShouldLog() {
log.Warningf(ctx, "raft receive queue for r%d is full", req.RangeID)
}
return false
}
return enqueue
}
// HandleRaftRequestSent is called to capture outgoing Raft messages just prior
// to their transmission to the raftSendQueue. Note that the message might not
// be successfully queued if it gets dropped by SendAsync due to a full outgoing
// queue. Currently, this is only used for metrics update which is why it only
// takes specific properties of the request as arguments.
func (s *Store) HandleRaftRequestSent(
ctx context.Context, fromNodeID roachpb.NodeID, toNodeID roachpb.NodeID, msgSize int64,
) {
comparisonResult := s.getLocalityComparison(ctx, fromNodeID, toNodeID)
s.metrics.updateCrossLocalityMetricsOnOutgoingRaftMsg(comparisonResult, msgSize)
}
// withReplicaForRequest calls the supplied function with the (lazily
// initialized) Replica specified in the request. The replica passed to
// the function will have its Replica.raftMu locked.
func (s *Store) withReplicaForRequest(
ctx context.Context,
req *kvserverpb.RaftMessageRequest,
f func(context.Context, *Replica) *kvpb.Error,
) *kvpb.Error {
// Lazily create the replica.
r, _, err := s.getOrCreateReplica(
ctx,
req.RangeID,
req.ToReplica.ReplicaID,
&req.FromReplica,
)
if err != nil {
return kvpb.NewError(err)
}
defer r.raftMu.Unlock()
r.setLastReplicaDescriptors(req)
return f(ctx, r)
}
// processRaftRequestWithReplica processes the (non-snapshot) Raft request on
// the specified replica. Notably, it does not handle updates to the Raft Ready
// state; callers will probably want to handle this themselves at some point.
func (s *Store) processRaftRequestWithReplica(
ctx context.Context, r *Replica, req *kvserverpb.RaftMessageRequest,
) *kvpb.Error {
// Record the CPU time processing the request for this replica. This is
// recorded regardless of errors that are encountered.
defer r.MeasureRaftCPUNanos(grunning.Time())
if verboseRaftLoggingEnabled() {
log.Infof(ctx, "incoming raft message:\n%s", raftDescribeMessage(req.Message, raftEntryFormatter))
}
if req.Message.Type == raftpb.MsgSnap {
log.Fatalf(ctx, "unexpected snapshot: %+v", req)
}
if req.Quiesce {
if req.Message.Type != raftpb.MsgHeartbeat {
log.Fatalf(ctx, "unexpected quiesce: %+v", req)
}
if r.maybeQuiesceOnNotify(
ctx,
req.Message,
laggingReplicaSet(req.LaggingFollowersOnQuiesce),
) {
return nil
}
}
if req.ToReplica.ReplicaID == 0 {
log.VEventf(ctx, 1, "refusing incoming Raft message %s from %+v to %+v",
req.Message.Type, req.FromReplica, req.ToReplica)
return kvpb.NewErrorf(
"cannot recreate replica that is not a member of its range (StoreID %s not found in r%d)",
r.store.StoreID(), req.RangeID,
)
}
drop := maybeDropMsgApp(ctx, (*replicaMsgAppDropper)(r), &req.Message, req.RangeStartKey)
if !drop {
if err := r.stepRaftGroup(req); err != nil {
return kvpb.NewError(err)
}
}
return nil
}
// processRaftSnapshotRequest processes the incoming snapshot Raft request on
// the request's specified replica. The function makes sure to handle any
// updated Raft Ready state. It also adds and later removes the (potentially)
// necessary placeholder to protect against concurrent access to the keyspace
// encompassed by the snapshot but not yet guarded by the replica.
//
// If (and only if) no error is returned, the placeholder (if any) in inSnap
// will have been removed.
func (s *Store) processRaftSnapshotRequest(
ctx context.Context, snapHeader *kvserverpb.SnapshotRequest_Header, inSnap IncomingSnapshot,
) (*raftpb.Message, *kvpb.Error) {
var msgAppResp *raftpb.Message
pErr := s.withReplicaForRequest(ctx, &snapHeader.RaftMessageRequest, func(
ctx context.Context, r *Replica,
) (pErr *kvpb.Error) {
ctx = r.AnnotateCtx(ctx)
if snapHeader.RaftMessageRequest.Message.Type != raftpb.MsgSnap {
log.Fatalf(ctx, "expected snapshot: %+v", snapHeader.RaftMessageRequest)
}
typ := removePlaceholderFailed
defer func() {
// In the typical case, handleRaftReadyRaftMuLocked calls through to
// applySnapshot which will apply the snapshot and also converts the
// placeholder entry (if any) to the now-initialized replica. However we
// may also error out below, or raft may also ignore the snapshot, and so
// the placeholder would remain.
//
// NB: it's unclear in which case we could actually get raft to ignore a
// snapshot attached to a placeholder. A placeholder existing implies that
// the snapshot is targeting an uninitialized replica. The only known reason
// for raft to ignore a snapshot is if it doesn't move the applied index
// forward, but an uninitialized replica's applied index is zero (and a
// snapshot's is at least raftInitialLogIndex).
if inSnap.placeholder != nil {
if _, err := s.removePlaceholder(ctx, inSnap.placeholder, typ); err != nil {
log.Fatalf(ctx, "unable to remove placeholder: %s", err)
}
}
}()
if snapHeader.RaftMessageRequest.Message.From == snapHeader.RaftMessageRequest.Message.To {
// This is a special case exercised during recovery from loss of quorum.
// In this case, a forged snapshot will be sent to the replica and will
// hit this code path (if we make up a non-existent follower, Raft will
// drop the message, hence we are forced to make the receiver the sender).
//
// Unfortunately, at the time of writing, Raft assumes that a snapshot
// is always received from the leader (of the given term), which plays
// poorly with these forged snapshots. However, a zero sender works just
// fine as the value zero represents "no known leader".
//
// We prefer not to introduce a zero origin of the message as throughout
// our code we rely on it being present. Instead, we reset the origin
// that raft looks at just before handing the message off.
snapHeader.RaftMessageRequest.Message.From = 0
}
// NB: we cannot get errRemoved here because we're promised by
// withReplicaForRequest that this replica is not currently being removed
// and we've been holding the raftMu the entire time.
if err := r.stepRaftGroup(&snapHeader.RaftMessageRequest); err != nil {
return kvpb.NewError(err)
}
// We've handed the snapshot to Raft, which will typically apply it (in
// which case the placeholder, if any, is removed by the time
// handleRaftReadyRaftMuLocked returns. We handle the other case in a
// defer() above. Note that we could infer when the placeholder should still
// be there based on `stats.snap.applied` but it is a questionable use of
// stats and more susceptible to bugs.
typ = removePlaceholderDropped
stats, err := r.handleRaftReadyRaftMuLocked(ctx, inSnap)
maybeFatalOnRaftReadyErr(ctx, err)
if !stats.snap.applied {
// This line would be hit if a snapshot was sent when it isn't necessary
// (i.e. follower was able to catch up via the log in the interim) or when
// multiple snapshots raced (as is possible when raft leadership changes
// and both the old and new leaders send snapshots).
log.Infof(ctx, "ignored stale snapshot at index %d", snapHeader.RaftMessageRequest.Message.Snapshot.Metadata.Index)
s.metrics.RangeSnapshotRecvUnusable.Inc(1)
}
// If the snapshot was applied and acked with an MsgAppResp, return that
// message up the stack. We're using msgAppRespCh as a shortcut to avoid
// plumbing return parameters through an additional few layers of raft
// handling.
//
// NB: in practice there's always an MsgAppResp here, but it is better not
// to rely on what is essentially discretionary raft behavior.
select {
case msg := <-inSnap.msgAppRespCh:
msgAppResp = &msg
default:
}
return nil
})
if pErr != nil {
return nil, pErr
}
return msgAppResp, nil
}
// HandleRaftResponse implements the IncomingRaftMessageHandler interface. Per
// the interface specification, an error is returned if and only if the
// underlying Raft connection should be closed. It requires that s.mu is not
// held.
func (s *Store) HandleRaftResponse(
ctx context.Context, resp *kvserverpb.RaftMessageResponse,
) error {
ctx = s.AnnotateCtx(ctx)
const name = "storage.Store: handle raft response"
return s.stopper.RunTaskWithErr(ctx, name, func(ctx context.Context) error {
repl, replErr := s.GetReplica(resp.RangeID)
if replErr == nil {
// Best-effort context annotation of replica.
ctx = repl.AnnotateCtx(ctx)
}
switch val := resp.Union.GetValue().(type) {
case *kvpb.Error:
switch tErr := val.GetDetail().(type) {
case *kvpb.ReplicaTooOldError:
if replErr != nil {
// RangeNotFoundErrors are expected here; nothing else is.
if !errors.HasType(replErr, (*kvpb.RangeNotFoundError)(nil)) {
log.Errorf(ctx, "%v", replErr)
}
return nil
}
// Grab the raftMu in addition to the replica mu because
// cancelFailedProposalsLocked below requires it.
repl.raftMu.Lock()
defer repl.raftMu.Unlock()
repl.mu.Lock()
// If the replica ID in the error does not match then we know
// that the replica has been removed and re-added quickly. In
// that case, we don't want to add it to the replicaGCQueue.
// If the replica is not alive then we also should ignore this error.
if tErr.ReplicaID != repl.replicaID ||
!repl.mu.destroyStatus.IsAlive() ||
// Ignore if we want to test the replicaGC queue.
s.TestingKnobs().DisableEagerReplicaRemoval {
repl.mu.Unlock()
return nil
}
// The replica will be garbage collected soon (we are sure
// since our replicaID is definitely too old), but in the meantime we
// already want to bounce all traffic from it. Note that the replica
// could be re-added with a higher replicaID, but we want to clear the
// replica's data before that happens.
if log.V(1) {
log.Infof(ctx, "setting local replica to destroyed due to ReplicaTooOld error")
}
repl.mu.Unlock()
nextReplicaID := tErr.ReplicaID + 1
return s.removeReplicaRaftMuLocked(ctx, repl, nextReplicaID, RemoveOptions{
DestroyData: true,
})
case *kvpb.RaftGroupDeletedError:
if replErr != nil {
// RangeNotFoundErrors are expected here; nothing else is.
if !errors.HasType(replErr, (*kvpb.RangeNotFoundError)(nil)) {
log.Errorf(ctx, "%v", replErr)
}
return nil
}
// If the replica is talking to a replica that's been deleted, it must be
// out of date. While this may just mean it's slightly behind, it can
// also mean that it is so far behind it no longer knows where any of the
// other replicas are (#23994). Add it to the replica GC queue to do a
// proper check.
s.replicaGCQueue.AddAsync(ctx, repl, replicaGCPriorityDefault)
case *kvpb.StoreNotFoundError:
log.Warningf(ctx, "raft error: node %d claims to not contain store %d for replica %s: %s",
resp.FromReplica.NodeID, resp.FromReplica.StoreID, resp.FromReplica, val)
return val.GetDetail() // close Raft connection
default:
log.Warningf(ctx, "got error from r%d, replica %s: %s",
resp.RangeID, resp.FromReplica, val)
}
default:
log.Warningf(ctx, "got unknown raft response type %T from replica %s: %s", val, resp.FromReplica, val)
}
return nil
})
}
// enqueueRaftUpdateCheck asynchronously registers the given range ID to be
// checked for raft updates when the processRaft goroutine is idle.
func (s *Store) enqueueRaftUpdateCheck(rangeID roachpb.RangeID) {
s.scheduler.EnqueueRaftReady(rangeID)
}
// TODO(tbg): rename this to processRecvQueue.
func (s *Store) processRequestQueue(ctx context.Context, rangeID roachpb.RangeID) bool {
q, ok := s.raftRecvQueues.Load(rangeID)
if !ok {
return false
}
infos, ok := q.Drain()
if !ok {
return false
}
defer q.Recycle(infos)
var hadError bool
for i := range infos {
info := &infos[i]
if pErr := s.withReplicaForRequest(
ctx, info.req, func(_ context.Context, r *Replica) *kvpb.Error {
return s.processRaftRequestWithReplica(r.raftCtx, r, info.req)
},
); pErr != nil {
hadError = true
if err := info.respStream.Send(newRaftMessageResponse(info.req, pErr)); err != nil {
// Seems excessive to log this on every occurrence as the other side
// might have closed.
log.VEventf(ctx, 1, "error sending error: %s", err)
}
}
s.metrics.RaftRcvdSteppedBytes.Inc(info.size)
infos[i] = raftRequestInfo{}
}
if hadError {
// If we're unable to process a request, consider dropping the request queue
// to free up space in the map.
// This is relevant if requests failed because the target replica could not
// be created (for example due to the Raft tombstone). The particular code
// here takes into account that we don't want to drop the queue if there
// are other messages waiting on it, or if the target replica exists. Raft
// tolerates the occasional dropped message, but our unit tests are less
// forgiving.
//
// See https://github.com/cockroachdb/cockroach/issues/30951#issuecomment-428010411.
//
// TODO(tbg): for adding actual memory accounting, we need more clarity about
// the contract. For example, it would be a problem if the queue got deleted
// (as a result of the replica getting deleted) but then getting recreated errantly.
// In that case, we would "permanently" leak an allocation, which over time could
// eat up the budget. We must ensure, essentially, that we create a queue only
// when the replica is alive (according to its destroyStatus) and ensure it is
// destroyed once that changes.
if _, exists := s.mu.replicasByRangeID.Load(rangeID); !exists && q.Len() == 0 {
s.raftRecvQueues.Delete(rangeID)
}
}
// NB: Even if we had errors and the corresponding replica no longer
// exists, returning true here won't cause a new, uninitialized replica
// to be created in processReady().
return true // ready
}
func (s *Store) processReady(rangeID roachpb.RangeID) {
r, ok := s.mu.replicasByRangeID.Load(rangeID)
if !ok {
return
}
// Record the CPU time processing the request for this replica. This is
// recorded regardless of errors that are encountered.
defer r.MeasureRaftCPUNanos(grunning.Time())
ctx := r.raftCtx
stats, err := r.handleRaftReady(ctx, noSnap)
maybeFatalOnRaftReadyErr(ctx, err)
elapsed := stats.tEnd.Sub(stats.tBegin)
s.metrics.RaftWorkingDurationNanos.Inc(elapsed.Nanoseconds())
s.metrics.RaftHandleReadyLatency.RecordValue(elapsed.Nanoseconds())
// Warn if Raft processing took too long. We use the same duration as we
// use for warning about excessive raft mutex lock hold times. Long
// processing time means we'll have starved local replicas of ticks and
// remote replicas will likely start campaigning.
if elapsed >= defaultReplicaRaftMuWarnThreshold {
log.Infof(ctx, "%s; node might be overloaded", stats)
}
}
func (s *Store) processTick(_ context.Context, rangeID roachpb.RangeID) bool {
r, ok := s.mu.replicasByRangeID.Load(rangeID)
if !ok {
return false
}
livenessMap, _ := s.livenessMap.Load().(livenesspb.IsLiveMap)
ioThresholds := s.ioThresholds.Current()
// Record the CPU time processing the request for this replica. This is
// recorded regardless of errors that are encountered.
defer r.MeasureRaftCPUNanos(grunning.Time())
start := timeutil.Now()
ctx := r.raftCtx
exists, err := r.tick(ctx, livenessMap, ioThresholds)
if err != nil {
log.Errorf(ctx, "%v", err)
}
s.metrics.RaftTickingDurationNanos.Inc(timeutil.Since(start).Nanoseconds())
return exists // ready
}
// nodeIsLiveCallback is invoked when a node transitions from non-live to live.
// Iterate through all replicas and find any which belong to ranges containing
// the implicated node. Unquiesce if currently quiesced and the node's replica
// is not up-to-date.
//
// See the comment in shouldFollowerQuiesceOnNotify for details on how these two
// functions combine to provide the guarantee that:
//
// If a quorum of replica in a Raft group is alive and at least
// one of these replicas is up-to-date, the Raft group will catch
// up any of the live, lagging replicas.
//
// Note that this mechanism can race with concurrent invocations of processTick,
// which may have a copy of the previous livenessMap where the now-live node is
// down. Those instances should be rare, however, and we expect the newly live
// node to eventually unquiesce the range.
func (s *Store) nodeIsLiveCallback(l livenesspb.Liveness) {
s.updateLivenessMap()
s.mu.replicasByRangeID.Range(func(r *Replica) {
r.mu.RLock()
quiescent := r.mu.quiescent
lagging := r.mu.laggingFollowersOnQuiesce
r.mu.RUnlock()
if quiescent && lagging.MemberStale(l) {
r.maybeUnquiesce(false /* wakeLeader */, false /* mayCampaign */) // already leader
}
})
}
func (s *Store) processRaft(ctx context.Context) {
s.scheduler.Start(s.stopper)
// Wait for the scheduler worker goroutines to finish.
if err := s.stopper.RunAsyncTask(ctx, "sched-wait", s.scheduler.Wait); err != nil {
s.scheduler.Wait(ctx)
}
_ = s.stopper.RunAsyncTask(ctx, "sched-tick-loop", s.raftTickLoop)
_ = s.stopper.RunAsyncTask(ctx, "coalesced-hb-loop", s.coalescedHeartbeatsLoop)
s.stopper.AddCloser(stop.CloserFn(func() {
s.cfg.Transport.StopIncomingRaftMessages(s.StoreID())
s.cfg.Transport.StopOutgoingMessage(s.StoreID())
}))
s.syncWaiter.Start(ctx, s.stopper)
// We'll want to cancel all in-flight proposals. Proposals embed tracing
// spans in them, and we don't want to be leaking any.
s.stopper.AddCloser(stop.CloserFn(func() {
s.VisitReplicas(func(r *Replica) (more bool) {
r.mu.Lock()
r.mu.proposalBuf.FlushLockedWithoutProposing(ctx)
for k, prop := range r.mu.proposals {
delete(r.mu.proposals, k)
prop.finishApplication(
context.Background(),
makeProposalResultErr(
kvpb.NewAmbiguousResultErrorf("store is stopping")))
}
r.mu.Unlock()
return true
})
}))
}
func (s *Store) raftTickLoop(ctx context.Context) {
ticker := time.NewTicker(s.cfg.RaftTickInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Update the liveness map.
if s.cfg.NodeLiveness != nil {
s.updateLivenessMap()
}
s.updateIOThresholdMap()
s.unquiescedReplicas.Lock()
// Why do we bother to ever queue a Replica on the Raft scheduler for
// tick processing? Couldn't we just call Replica.tick() here? Yes, but
// then a single bad/slow Replica can disrupt tick processing for every
// Replica on the store which cascades into Raft elections and more
// disruption.
batch := s.scheduler.NewEnqueueBatch()
for rangeID := range s.unquiescedReplicas.m {
batch.Add(rangeID)
}
s.unquiescedReplicas.Unlock()
s.scheduler.EnqueueRaftTicks(batch)
batch.Close()
s.metrics.RaftTicks.Inc(1)
case <-s.stopper.ShouldQuiesce():
return
}
}
}
func (s *Store) updateIOThresholdMap() {
ioThresholdMap := map[roachpb.StoreID]*admissionpb.IOThreshold{}
for _, sd := range s.cfg.StorePool.GetStores() {
ioThreshold := sd.Capacity.IOThreshold // need a copy
ioThresholdMap[sd.StoreID] = &ioThreshold
}
threshold := pauseReplicationIOThreshold.Get(&s.cfg.Settings.SV)
if threshold <= 0 {
threshold = math.MaxFloat64
}
old, cur := s.ioThresholds.Replace(ioThresholdMap, threshold)
// Log whenever the set of overloaded stores changes.
shouldLog := log.V(1) || old.seq != cur.seq
if shouldLog {
log.Infof(
s.AnnotateCtx(context.Background()), "pausable stores: %+v", cur)
}
}
func (s *Store) updateLivenessMap() {
nextMap := s.cfg.NodeLiveness.GetIsLiveMap()
for nodeID, entry := range nextMap {
if entry.IsLive {
continue
}
// Liveness claims that this node is down, but ConnHealth gets the last say
// because we'd rather quiesce a range too little than one too often. Note
// that this policy is different from the one governing the releasing of
// proposal quota; see comments over there.
//
// NB: This has false negatives when we haven't attempted to connect to the
// node yet, where it will return rpc.ErrNotHeartbeated regardless of
// whether the node is up or not. Once connected, the RPC circuit breakers
// will continually probe the connection. The check can also have false
// positives if the node goes down after populating the map, but that
// matters even less.
entry.IsLive = s.cfg.NodeDialer.ConnHealth(nodeID, rpc.SystemClass) == nil
nextMap[nodeID] = entry
}
s.livenessMap.Store(nextMap)
}
// Since coalesced heartbeats adds latency to heartbeat messages, it is
// beneficial to have it run on a faster cycle than once per tick, so that
// the delay does not impact latency-sensitive features such as quiescence.
func (s *Store) coalescedHeartbeatsLoop(ctx context.Context) {
ticker := time.NewTicker(s.cfg.CoalescedHeartbeatsInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.sendQueuedHeartbeats(ctx)
case <-s.stopper.ShouldQuiesce():
return
}
}
}
// sendQueuedHeartbeatsToNode requires that the s.coalescedMu lock is held. It
// returns the number of heartbeats that were sent.
func (s *Store) sendQueuedHeartbeatsToNode(
ctx context.Context, beats, resps []kvserverpb.RaftHeartbeat, to roachpb.StoreIdent,
) int {
var msgType raftpb.MessageType
if len(beats) == 0 && len(resps) == 0 {
return 0
} else if len(resps) == 0 {
msgType = raftpb.MsgHeartbeat
} else if len(beats) == 0 {
msgType = raftpb.MsgHeartbeatResp
} else {
log.Fatal(ctx, "cannot coalesce both heartbeats and responses")
}
chReq := newRaftMessageRequest()
*chReq = kvserverpb.RaftMessageRequest{
RangeID: 0,
ToReplica: roachpb.ReplicaDescriptor{
NodeID: to.NodeID,
StoreID: to.StoreID,
ReplicaID: 0,
},
FromReplica: roachpb.ReplicaDescriptor{
NodeID: s.Ident.NodeID,
StoreID: s.Ident.StoreID,
},
Message: raftpb.Message{
Type: msgType,
},
Heartbeats: beats,
HeartbeatResps: resps,
}
if log.V(4) {
log.Infof(ctx, "sending raft request (coalesced) %+v", chReq)
}
if !s.cfg.Transport.SendAsync(chReq, rpc.SystemClass) {
for _, beat := range beats {
if repl, ok := s.mu.replicasByRangeID.Load(beat.RangeID); ok {
repl.addUnreachableRemoteReplica(beat.ToReplicaID)
}
}
for _, resp := range resps {
if repl, ok := s.mu.replicasByRangeID.Load(resp.RangeID); ok {
repl.addUnreachableRemoteReplica(resp.ToReplicaID)
}
}
return 0
}
return len(beats) + len(resps)
}
func (s *Store) sendQueuedHeartbeats(ctx context.Context) {
s.coalescedMu.Lock()
heartbeats := s.coalescedMu.heartbeats
heartbeatResponses := s.coalescedMu.heartbeatResponses
s.coalescedMu.heartbeats = map[roachpb.StoreIdent][]kvserverpb.RaftHeartbeat{}
s.coalescedMu.heartbeatResponses = map[roachpb.StoreIdent][]kvserverpb.RaftHeartbeat{}
s.coalescedMu.Unlock()
var beatsSent int
for to, beats := range heartbeats {
beatsSent += s.sendQueuedHeartbeatsToNode(ctx, beats, nil, to)
}
for to, resps := range heartbeatResponses {
beatsSent += s.sendQueuedHeartbeatsToNode(ctx, nil, resps, to)
}
s.metrics.RaftCoalescedHeartbeatsPending.Update(int64(beatsSent))
}
func (s *Store) updateCapacityGauges(ctx context.Context) error {
desc, err := s.Descriptor(ctx, false /* useCached */)
if err != nil {
return err
}
s.metrics.Capacity.Update(desc.Capacity.Capacity)
s.metrics.Available.Update(desc.Capacity.Available)
s.metrics.Used.Update(desc.Capacity.Used)
return nil
}
| pkg/kv/kvserver/store_raft.go | 0 | https://github.com/cockroachdb/cockroach/commit/66345a5db9de739faf4edbbaaae4c606daaec03f | [
0.0013638357631862164,
0.00018878096307162195,
0.00015969235391821712,
0.00017091147310566157,
0.0001362780312774703
] |
{
"id": 0,
"code_window": [
"\tplannercore \"github.com/pingcap/tidb/planner/core\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\ttikvstore \"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\ttikvutil \"github.com/pingcap/tidb/store/tikv/util\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "executor/executor.go",
"type": "add",
"edit_start_line_idx": 49
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"math"
"runtime"
"runtime/trace"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
tikvutil "github.com/pingcap/tidb/store/tikv/util"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"go.uber.org/zap"
)
var (
_ Executor = &baseExecutor{}
_ Executor = &CheckTableExec{}
_ Executor = &HashAggExec{}
_ Executor = &HashJoinExec{}
_ Executor = &IndexLookUpExecutor{}
_ Executor = &IndexReaderExecutor{}
_ Executor = &LimitExec{}
_ Executor = &MaxOneRowExec{}
_ Executor = &MergeJoinExec{}
_ Executor = &ProjectionExec{}
_ Executor = &SelectionExec{}
_ Executor = &SelectLockExec{}
_ Executor = &ShowNextRowIDExec{}
_ Executor = &ShowDDLExec{}
_ Executor = &ShowDDLJobsExec{}
_ Executor = &ShowDDLJobQueriesExec{}
_ Executor = &SortExec{}
_ Executor = &StreamAggExec{}
_ Executor = &TableDualExec{}
_ Executor = &TableReaderExecutor{}
_ Executor = &TableScanExec{}
_ Executor = &TopNExec{}
_ Executor = &UnionExec{}
// GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker
GlobalMemoryUsageTracker *memory.Tracker
// GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker
GlobalDiskUsageTracker *disk.Tracker
)
type baseExecutor struct {
ctx sessionctx.Context
id int
schema *expression.Schema // output schema
initCap int
maxChunkSize int
children []Executor
retFieldTypes []*types.FieldType
runtimeStats *execdetails.BasicRuntimeStats
}
const (
// globalPanicStorageExceed represents the panic message when out of storage quota.
globalPanicStorageExceed string = "Out Of Global Storage Quota!"
// globalPanicMemoryExceed represents the panic message when out of memory limit.
globalPanicMemoryExceed string = "Out Of Global Memory Limit!"
)
// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota.
type globalPanicOnExceed struct {
memory.BaseOOMAction
mutex sync.Mutex // For synchronization.
}
func init() {
action := &globalPanicOnExceed{}
GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1)
GlobalMemoryUsageTracker.SetActionOnExceed(action)
GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1)
GlobalDiskUsageTracker.SetActionOnExceed(action)
}
// SetLogHook sets a hook for PanicOnExceed.
func (a *globalPanicOnExceed) SetLogHook(hook func(uint64)) {}
// Action panics when storage usage exceeds storage quota.
func (a *globalPanicOnExceed) Action(t *memory.Tracker) {
a.mutex.Lock()
defer a.mutex.Unlock()
msg := ""
switch t.Label() {
case memory.LabelForGlobalStorage:
msg = globalPanicStorageExceed
case memory.LabelForGlobalMemory:
msg = globalPanicMemoryExceed
default:
msg = "Out of Unknown Resource Quota!"
}
panic(msg)
}
// GetPriority get the priority of the Action
func (a *globalPanicOnExceed) GetPriority() int64 {
return memory.DefPanicPriority
}
// base returns the baseExecutor of an executor, don't override this method!
func (e *baseExecutor) base() *baseExecutor {
return e
}
// Open initializes children recursively and "childrenResults" according to children's schemas.
func (e *baseExecutor) Open(ctx context.Context) error {
for _, child := range e.children {
err := child.Open(ctx)
if err != nil {
return err
}
}
return nil
}
// Close closes all executors and release all resources.
func (e *baseExecutor) Close() error {
var firstErr error
for _, src := range e.children {
if err := src.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// Schema returns the current baseExecutor's schema. If it is nil, then create and return a new one.
func (e *baseExecutor) Schema() *expression.Schema {
if e.schema == nil {
return expression.NewSchema()
}
return e.schema
}
// newFirstChunk creates a new chunk to buffer current executor's result.
func newFirstChunk(e Executor) *chunk.Chunk {
base := e.base()
return chunk.New(base.retFieldTypes, base.initCap, base.maxChunkSize)
}
// newList creates a new List to buffer current executor's result.
func newList(e Executor) *chunk.List {
base := e.base()
return chunk.NewList(base.retFieldTypes, base.initCap, base.maxChunkSize)
}
// retTypes returns all output column types.
func retTypes(e Executor) []*types.FieldType {
base := e.base()
return base.retFieldTypes
}
// Next fills multiple rows into a chunk.
func (e *baseExecutor) Next(ctx context.Context, req *chunk.Chunk) error {
return nil
}
func (e *baseExecutor) updateDeltaForTableID(id int64) {
txnCtx := e.ctx.GetSessionVars().TxnCtx
txnCtx.UpdateDeltaForTable(id, 0, 0, map[int64]int64{})
}
func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, children ...Executor) baseExecutor {
e := baseExecutor{
children: children,
ctx: ctx,
id: id,
schema: schema,
initCap: ctx.GetSessionVars().InitChunkSize,
maxChunkSize: ctx.GetSessionVars().MaxChunkSize,
}
if ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil {
if e.id > 0 {
e.runtimeStats = &execdetails.BasicRuntimeStats{}
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(id, e.runtimeStats)
}
}
if schema != nil {
cols := schema.Columns
e.retFieldTypes = make([]*types.FieldType, len(cols))
for i := range cols {
e.retFieldTypes[i] = cols[i].RetType
}
}
return e
}
// Executor is the physical implementation of a algebra operator.
//
// In TiDB, all algebra operators are implemented as iterators, i.e., they
// support a simple Open-Next-Close protocol. See this paper for more details:
//
// "Volcano-An Extensible and Parallel Query Evaluation System"
//
// Different from Volcano's execution model, a "Next" function call in TiDB will
// return a batch of rows, other than a single row in Volcano.
// NOTE: Executors must call "chk.Reset()" before appending their results to it.
type Executor interface {
base() *baseExecutor
Open(context.Context) error
Next(ctx context.Context, req *chunk.Chunk) error
Close() error
Schema() *expression.Schema
}
// Next is a wrapper function on e.Next(), it handles some common codes.
func Next(ctx context.Context, e Executor, req *chunk.Chunk) error {
base := e.base()
if base.runtimeStats != nil {
start := time.Now()
defer func() { base.runtimeStats.Record(time.Since(start), req.NumRows()) }()
}
sessVars := base.ctx.GetSessionVars()
if atomic.LoadUint32(&sessVars.Killed) == 1 {
return ErrQueryInterrupted
}
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(fmt.Sprintf("%T.Next", e), opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
if trace.IsEnabled() {
defer trace.StartRegion(ctx, fmt.Sprintf("%T.Next", e)).End()
}
err := e.Next(ctx, req)
if err != nil {
return err
}
// recheck whether the session/query is killed during the Next()
if atomic.LoadUint32(&sessVars.Killed) == 1 {
err = ErrQueryInterrupted
}
return err
}
// CancelDDLJobsExec represents a cancel DDL jobs executor.
type CancelDDLJobsExec struct {
baseExecutor
cursor int
jobIDs []int64
errs []error
}
// Next implements the Executor Next interface.
func (e *CancelDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if e.cursor >= len(e.jobIDs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobIDs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
req.AppendString(0, fmt.Sprintf("%d", e.jobIDs[i]))
if e.errs[i] != nil {
req.AppendString(1, fmt.Sprintf("error: %v", e.errs[i]))
} else {
req.AppendString(1, "successful")
}
}
e.cursor += numCurBatch
return nil
}
// ShowNextRowIDExec represents a show the next row ID executor.
type ShowNextRowIDExec struct {
baseExecutor
tblName *ast.TableName
done bool
}
// Next implements the Executor Next interface.
func (e *ShowNextRowIDExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
is := domain.GetDomain(e.ctx).InfoSchema()
tbl, err := is.TableByName(e.tblName.Schema, e.tblName.Name)
if err != nil {
return err
}
tblMeta := tbl.Meta()
allocators := tbl.Allocators(e.ctx)
for _, alloc := range allocators {
nextGlobalID, err := alloc.NextGlobalAutoID(tblMeta.ID)
if err != nil {
return err
}
var colName, idType string
switch alloc.GetType() {
case autoid.RowIDAllocType, autoid.AutoIncrementType:
idType = "AUTO_INCREMENT"
if col := tblMeta.GetAutoIncrementColInfo(); col != nil {
colName = col.Name.O
} else {
colName = model.ExtraHandleName.O
}
case autoid.AutoRandomType:
idType = "AUTO_RANDOM"
colName = tblMeta.GetPkName().O
case autoid.SequenceType:
idType = "SEQUENCE"
colName = ""
default:
return autoid.ErrInvalidAllocatorType.GenWithStackByArgs()
}
req.AppendString(0, e.tblName.Schema.O)
req.AppendString(1, e.tblName.Name.O)
req.AppendString(2, colName)
req.AppendInt64(3, nextGlobalID)
req.AppendString(4, idType)
}
e.done = true
return nil
}
// ShowDDLExec represents a show DDL executor.
type ShowDDLExec struct {
baseExecutor
ddlOwnerID string
selfID string
ddlInfo *admin.DDLInfo
done bool
}
// Next implements the Executor Next interface.
func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
ddlJobs := ""
query := ""
l := len(e.ddlInfo.Jobs)
for i, job := range e.ddlInfo.Jobs {
ddlJobs += job.String()
query += job.Query
if i != l-1 {
ddlJobs += "\n"
query += "\n"
}
}
serverInfo, err := infosync.GetServerInfoByID(ctx, e.ddlOwnerID)
if err != nil {
return err
}
serverAddress := serverInfo.IP + ":" +
strconv.FormatUint(uint64(serverInfo.Port), 10)
req.AppendInt64(0, e.ddlInfo.SchemaVer)
req.AppendString(1, e.ddlOwnerID)
req.AppendString(2, serverAddress)
req.AppendString(3, ddlJobs)
req.AppendString(4, e.selfID)
req.AppendString(5, query)
e.done = true
return nil
}
// ShowDDLJobsExec represent a show DDL jobs executor.
type ShowDDLJobsExec struct {
baseExecutor
DDLJobRetriever
jobNumber int
is infoschema.InfoSchema
}
// DDLJobRetriever retrieve the DDLJobs.
// nolint:structcheck
type DDLJobRetriever struct {
runningJobs []*model.Job
historyJobIter *meta.LastJobIterator
cursor int
is infoschema.InfoSchema
activeRoles []*auth.RoleIdentity
cacheJobs []*model.Job
}
func (e *DDLJobRetriever) initial(txn kv.Transaction) error {
jobs, err := admin.GetDDLJobs(txn)
if err != nil {
return err
}
m := meta.NewMeta(txn)
e.historyJobIter, err = m.GetLastHistoryDDLJobsIterator()
if err != nil {
return err
}
e.runningJobs = jobs
e.cursor = 0
return nil
}
func (e *DDLJobRetriever) appendJobToChunk(req *chunk.Chunk, job *model.Job, checker privilege.Manager) {
schemaName := job.SchemaName
tableName := ""
finishTS := uint64(0)
if job.BinlogInfo != nil {
finishTS = job.BinlogInfo.FinishedTS
if job.BinlogInfo.TableInfo != nil {
tableName = job.BinlogInfo.TableInfo.Name.L
}
if len(schemaName) == 0 && job.BinlogInfo.DBInfo != nil {
schemaName = job.BinlogInfo.DBInfo.Name.L
}
}
// For compatibility, the old version of DDL Job wasn't store the schema name and table name.
if len(schemaName) == 0 {
schemaName = getSchemaName(e.is, job.SchemaID)
}
if len(tableName) == 0 {
tableName = getTableName(e.is, job.TableID)
}
startTime := ts2Time(job.StartTS)
finishTime := ts2Time(finishTS)
// Check the privilege.
if checker != nil && !checker.RequestVerification(e.activeRoles, strings.ToLower(schemaName), strings.ToLower(tableName), "", mysql.AllPrivMask) {
return
}
req.AppendInt64(0, job.ID)
req.AppendString(1, schemaName)
req.AppendString(2, tableName)
req.AppendString(3, job.Type.String())
req.AppendString(4, job.SchemaState.String())
req.AppendInt64(5, job.SchemaID)
req.AppendInt64(6, job.TableID)
req.AppendInt64(7, job.RowCount)
req.AppendTime(8, startTime)
if finishTS > 0 {
req.AppendTime(9, finishTime)
} else {
req.AppendNull(9)
}
req.AppendString(10, job.State.String())
}
func ts2Time(timestamp uint64) types.Time {
duration := time.Duration(math.Pow10(9-int(types.DefaultFsp))) * time.Nanosecond
t := model.TSConvert2Time(timestamp)
t.Truncate(duration)
return types.NewTime(types.FromGoTime(t), mysql.TypeDatetime, types.DefaultFsp)
}
// ShowDDLJobQueriesExec represents a show DDL job queries executor.
// The jobs id that is given by 'admin show ddl job queries' statement,
// only be searched in the latest 10 history jobs
type ShowDDLJobQueriesExec struct {
baseExecutor
cursor int
jobs []*model.Job
jobIDs []int64
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
jobs, err := admin.GetDDLJobs(txn)
if err != nil {
return err
}
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
if err != nil {
return err
}
e.jobs = append(e.jobs, jobs...)
e.jobs = append(e.jobs, historyJobs...)
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobQueriesExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if e.cursor >= len(e.jobs) {
return nil
}
if len(e.jobIDs) >= len(e.jobs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor)
for _, id := range e.jobIDs {
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
if id == e.jobs[i].ID {
req.AppendString(0, e.jobs[i].Query)
}
}
}
e.cursor += numCurBatch
return nil
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobsExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
e.DDLJobRetriever.is = e.is
if e.jobNumber == 0 {
e.jobNumber = admin.DefNumHistoryJobs
}
err = e.DDLJobRetriever.initial(txn)
if err != nil {
return err
}
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if (e.cursor - len(e.runningJobs)) >= e.jobNumber {
return nil
}
count := 0
// Append running ddl jobs.
if e.cursor < len(e.runningJobs) {
numCurBatch := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
e.appendJobToChunk(req, e.runningJobs[i], nil)
}
e.cursor += numCurBatch
count += numCurBatch
}
// Append history ddl jobs.
var err error
if count < req.Capacity() {
num := req.Capacity() - count
remainNum := e.jobNumber - (e.cursor - len(e.runningJobs))
num = mathutil.Min(num, remainNum)
e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs)
if err != nil {
return err
}
for _, job := range e.cacheJobs {
e.appendJobToChunk(req, job, nil)
}
e.cursor += len(e.cacheJobs)
}
return nil
}
func getSchemaName(is infoschema.InfoSchema, id int64) string {
var schemaName string
DBInfo, ok := is.SchemaByID(id)
if ok {
schemaName = DBInfo.Name.O
return schemaName
}
return schemaName
}
func getTableName(is infoschema.InfoSchema, id int64) string {
var tableName string
table, ok := is.TableByID(id)
if ok {
tableName = table.Meta().Name.O
return tableName
}
return tableName
}
// CheckTableExec represents a check table executor.
// It is built from the "admin check table" statement, and it checks if the
// index matches the records in the table.
type CheckTableExec struct {
baseExecutor
dbName string
table table.Table
indexInfos []*model.IndexInfo
srcs []*IndexLookUpExecutor
done bool
is infoschema.InfoSchema
exitCh chan struct{}
retCh chan error
checkIndex bool
}
// Open implements the Executor Open interface.
func (e *CheckTableExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
for _, src := range e.srcs {
if err := src.Open(ctx); err != nil {
return errors.Trace(err)
}
}
e.done = false
return nil
}
// Close implements the Executor Close interface.
func (e *CheckTableExec) Close() error {
var firstErr error
for _, src := range e.srcs {
if err := src.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (e *CheckTableExec) checkTableIndexHandle(ctx context.Context, idxInfo *model.IndexInfo) error {
// For partition table, there will be multi same index indexLookUpReaders on different partitions.
for _, src := range e.srcs {
if src.index.Name.L == idxInfo.Name.L {
err := e.checkIndexHandle(ctx, src)
if err != nil {
return err
}
}
}
return nil
}
func (e *CheckTableExec) checkIndexHandle(ctx context.Context, src *IndexLookUpExecutor) error {
cols := src.schema.Columns
retFieldTypes := make([]*types.FieldType, len(cols))
for i := range cols {
retFieldTypes[i] = cols[i].RetType
}
chk := chunk.New(retFieldTypes, e.initCap, e.maxChunkSize)
var err error
for {
err = Next(ctx, src, chk)
if err != nil {
break
}
if chk.NumRows() == 0 {
break
}
select {
case <-e.exitCh:
return nil
default:
}
}
e.retCh <- errors.Trace(err)
return errors.Trace(err)
}
func (e *CheckTableExec) handlePanic(r interface{}) {
if r != nil {
e.retCh <- errors.Errorf("%v", r)
}
}
// Next implements the Executor Next interface.
func (e *CheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error {
if e.done || len(e.srcs) == 0 {
return nil
}
defer func() { e.done = true }()
idxNames := make([]string, 0, len(e.indexInfos))
for _, idx := range e.indexInfos {
idxNames = append(idxNames, idx.Name.O)
}
greater, idxOffset, err := admin.CheckIndicesCount(e.ctx, e.dbName, e.table.Meta().Name.O, idxNames)
if err != nil {
// For admin check index statement, for speed up and compatibility, doesn't do below checks.
if e.checkIndex {
return errors.Trace(err)
}
if greater == admin.IdxCntGreater {
err = e.checkTableIndexHandle(ctx, e.indexInfos[idxOffset])
} else if greater == admin.TblCntGreater {
err = e.checkTableRecord(idxOffset)
}
if err != nil && admin.ErrDataInConsistent.Equal(err) {
return ErrAdminCheckTable.GenWithStack("%v err:%v", e.table.Meta().Name, err)
}
return errors.Trace(err)
}
// The number of table rows is equal to the number of index rows.
// TODO: Make the value of concurrency adjustable. And we can consider the number of records.
concurrency := 3
wg := sync.WaitGroup{}
for i := range e.srcs {
wg.Add(1)
go func(num int) {
defer wg.Done()
util.WithRecovery(func() {
err1 := e.checkIndexHandle(ctx, e.srcs[num])
if err1 != nil {
logutil.Logger(ctx).Info("check index handle failed", zap.Error(err1))
}
}, e.handlePanic)
}(i)
if (i+1)%concurrency == 0 {
wg.Wait()
}
}
for i := 0; i < len(e.srcs); i++ {
err = <-e.retCh
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (e *CheckTableExec) checkTableRecord(idxOffset int) error {
idxInfo := e.indexInfos[idxOffset]
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
if e.table.Meta().GetPartitionInfo() == nil {
idx := tables.NewIndex(e.table.Meta().ID, e.table.Meta(), idxInfo)
return admin.CheckRecordAndIndex(e.ctx, txn, e.table, idx)
}
info := e.table.Meta().GetPartitionInfo()
for _, def := range info.Definitions {
pid := def.ID
partition := e.table.(table.PartitionedTable).GetPartition(pid)
idx := tables.NewIndex(def.ID, e.table.Meta(), idxInfo)
if err := admin.CheckRecordAndIndex(e.ctx, txn, partition, idx); err != nil {
return errors.Trace(err)
}
}
return nil
}
// ShowSlowExec represents the executor of showing the slow queries.
// It is build from the "admin show slow" statement:
// admin show slow top [internal | all] N
// admin show slow recent N
type ShowSlowExec struct {
baseExecutor
ShowSlow *ast.ShowSlow
result []*domain.SlowQueryInfo
cursor int
}
// Open implements the Executor Open interface.
func (e *ShowSlowExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
dom := domain.GetDomain(e.ctx)
e.result = dom.ShowSlowQuery(e.ShowSlow)
return nil
}
// Next implements the Executor Next interface.
func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= len(e.result) {
return nil
}
for e.cursor < len(e.result) && req.NumRows() < e.maxChunkSize {
slow := e.result[e.cursor]
req.AppendString(0, slow.SQL)
req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), mysql.TypeTimestamp, types.MaxFsp))
req.AppendDuration(2, types.Duration{Duration: slow.Duration, Fsp: types.MaxFsp})
req.AppendString(3, slow.Detail.String())
if slow.Succ {
req.AppendInt64(4, 1)
} else {
req.AppendInt64(4, 0)
}
req.AppendUint64(5, slow.ConnID)
req.AppendUint64(6, slow.TxnTS)
req.AppendString(7, slow.User)
req.AppendString(8, slow.DB)
req.AppendString(9, slow.TableIDs)
req.AppendString(10, slow.IndexNames)
if slow.Internal {
req.AppendInt64(11, 1)
} else {
req.AppendInt64(11, 0)
}
req.AppendString(12, slow.Digest)
e.cursor++
}
return nil
}
// SelectLockExec represents a select lock executor.
// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement.
// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor.
// After the execution, the keys are buffered in transaction, and will be sent to KV
// when doing commit. If there is any key already locked by another transaction,
// the transaction will rollback and retry.
type SelectLockExec struct {
baseExecutor
Lock *ast.SelectLockInfo
keys []kv.Key
tblID2Handle map[int64][]plannercore.HandleCols
partitionedTable []table.PartitionedTable
// tblID2Table is cached to reduce cost.
tblID2Table map[int64]table.PartitionedTable
}
// Open implements the Executor Open interface.
func (e *SelectLockExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
if len(e.tblID2Handle) > 0 && len(e.partitionedTable) > 0 {
e.tblID2Table = make(map[int64]table.PartitionedTable, len(e.partitionedTable))
for id := range e.tblID2Handle {
for _, p := range e.partitionedTable {
if id == p.Meta().ID {
e.tblID2Table[id] = p
}
}
}
}
return nil
}
// Next implements the Executor Next interface.
func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
err := Next(ctx, e.children[0], req)
if err != nil {
return err
}
// If there's no handle or it's not a `SELECT FOR UPDATE` statement.
if len(e.tblID2Handle) == 0 || (!plannercore.IsSelectForUpdateLockType(e.Lock.LockType)) {
return nil
}
if req.NumRows() > 0 {
iter := chunk.NewIterator4Chunk(req)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
for id, cols := range e.tblID2Handle {
physicalID := id
if pt, ok := e.tblID2Table[id]; ok {
// On a partitioned table, we have to use physical ID to encode the lock key!
p, err := pt.GetPartitionByRow(e.ctx, row.GetDatumRow(e.base().retFieldTypes))
if err != nil {
return err
}
physicalID = p.GetPhysicalID()
}
for _, col := range cols {
handle, err := col.BuildHandle(row)
if err != nil {
return err
}
e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physicalID, handle))
}
}
}
return nil
}
lockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout
if e.Lock.LockType == ast.SelectLockForUpdateNoWait {
lockWaitTime = kv.LockNoWait
} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {
lockWaitTime = int64(e.Lock.WaitSec) * 1000
}
if len(e.tblID2Handle) > 0 {
for id := range e.tblID2Handle {
e.updateDeltaForTableID(id)
}
}
if len(e.partitionedTable) > 0 {
for _, p := range e.partitionedTable {
pid := p.Meta().ID
e.updateDeltaForTableID(pid)
}
}
return doLockKeys(ctx, e.ctx, newLockCtx(e.ctx.GetSessionVars(), lockWaitTime), e.keys...)
}
func newLockCtx(seVars *variable.SessionVars, lockWaitTime int64) *tikvstore.LockCtx {
return &tikvstore.LockCtx{
Killed: &seVars.Killed,
ForUpdateTS: seVars.TxnCtx.GetForUpdateTS(),
LockWaitTime: lockWaitTime,
WaitStartTime: seVars.StmtCtx.GetLockWaitStartTime(),
PessimisticLockWaited: &seVars.StmtCtx.PessimisticLockWaited,
LockKeysDuration: &seVars.StmtCtx.LockKeysDuration,
LockKeysCount: &seVars.StmtCtx.LockKeysCount,
LockExpired: &seVars.TxnCtx.LockExpire,
}
}
// doLockKeys is the main entry for pessimistic lock keys
// waitTime means the lock operation will wait in milliseconds if target key is already
// locked by others. used for (select for update nowait) situation
// except 0 means alwaysWait 1 means nowait
func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error {
sctx := se.GetSessionVars().StmtCtx
if !sctx.InUpdateStmt && !sctx.InDeleteStmt {
atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1)
}
// Lock keys only once when finished fetching all results.
txn, err := se.Txn(true)
if err != nil {
return err
}
var lockKeyStats *tikvutil.LockKeysDetails
ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats)
err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...)
if lockKeyStats != nil {
sctx.MergeLockKeysExecDetails(lockKeyStats)
}
return err
}
// LimitExec represents limit executor
// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum.
type LimitExec struct {
baseExecutor
begin uint64
end uint64
cursor uint64
// meetFirstBatch represents whether we have met the first valid Chunk from child.
meetFirstBatch bool
childResult *chunk.Chunk
// columnIdxsUsedByChild keep column indexes of child executor used for inline projection
columnIdxsUsedByChild []int
}
// Next implements the Executor Next interface.
func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= e.end {
return nil
}
for !e.meetFirstBatch {
// transfer req's requiredRows to childResult and then adjust it in childResult
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize)
err := Next(ctx, e.children[0], e.adjustRequiredRows(e.childResult))
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if newCursor := e.cursor + batchSize; newCursor >= e.begin {
e.meetFirstBatch = true
begin, end := e.begin-e.cursor, batchSize
if newCursor > e.end {
end = e.end - e.cursor
}
e.cursor += end
if begin == end {
break
}
if e.columnIdxsUsedByChild != nil {
req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end))
} else {
req.Append(e.childResult, int(begin), int(end))
}
return nil
}
e.cursor += batchSize
}
e.childResult.Reset()
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize)
e.adjustRequiredRows(e.childResult)
err := Next(ctx, e.children[0], e.childResult)
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if e.cursor+batchSize > e.end {
e.childResult.TruncateTo(int(e.end - e.cursor))
batchSize = e.end - e.cursor
}
e.cursor += batchSize
if e.columnIdxsUsedByChild != nil {
for i, childIdx := range e.columnIdxsUsedByChild {
if err = req.SwapColumn(i, e.childResult, childIdx); err != nil {
return err
}
}
} else {
req.SwapColumns(e.childResult)
}
return nil
}
// Open implements the Executor Open interface.
func (e *LimitExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.childResult = newFirstChunk(e.children[0])
e.cursor = 0
e.meetFirstBatch = e.begin == 0
return nil
}
// Close implements the Executor Close interface.
func (e *LimitExec) Close() error {
e.childResult = nil
return e.baseExecutor.Close()
}
func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk {
// the limit of maximum number of rows the LimitExec should read
limitTotal := int(e.end - e.cursor)
var limitRequired int
if e.cursor < e.begin {
// if cursor is less than begin, it have to read (begin-cursor) rows to ignore
// and then read chk.RequiredRows() rows to return,
// so the limit is (begin-cursor)+chk.RequiredRows().
limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows()
} else {
// if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return.
limitRequired = chk.RequiredRows()
}
return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.maxChunkSize)
}
func init() {
// While doing optimization in the plan package, we need to execute uncorrelated subquery,
// but the plan package cannot import the executor package because of the dependency cycle.
// So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle.
plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p plannercore.PhysicalPlan, is infoschema.InfoSchema, sctx sessionctx.Context) ([]types.Datum, error) {
defer func(begin time.Time) {
s := sctx.GetSessionVars()
s.RewritePhaseInfo.PreprocessSubQueries++
s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin)
}(time.Now())
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("executor.EvalSubQuery", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
e := &executorBuilder{is: is, ctx: sctx}
exec := e.build(p)
if e.err != nil {
return nil, e.err
}
err := exec.Open(ctx)
defer terror.Call(exec.Close)
if err != nil {
return nil, err
}
chk := newFirstChunk(exec)
err = Next(ctx, exec, chk)
if err != nil {
return nil, err
}
if chk.NumRows() == 0 {
return nil, nil
}
row := chk.GetRow(0).GetDatumRow(retTypes(exec))
return row, err
}
}
// TableDualExec represents a dual table executor.
type TableDualExec struct {
baseExecutor
// numDualRows can only be 0 or 1.
numDualRows int
numReturned int
}
// Open implements the Executor Open interface.
func (e *TableDualExec) Open(ctx context.Context) error {
e.numReturned = 0
return nil
}
// Next implements the Executor Next interface.
func (e *TableDualExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.numReturned >= e.numDualRows {
return nil
}
if e.Schema().Len() == 0 {
req.SetNumVirtualRows(1)
} else {
for i := range e.Schema().Columns {
req.AppendNull(i)
}
}
e.numReturned = e.numDualRows
return nil
}
// SelectionExec represents a filter executor.
type SelectionExec struct {
baseExecutor
batched bool
filters []expression.Expression
selected []bool
inputIter *chunk.Iterator4Chunk
inputRow chunk.Row
childResult *chunk.Chunk
memTracker *memory.Tracker
}
// Open implements the Executor Open interface.
func (e *SelectionExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
return e.open(ctx)
}
func (e *SelectionExec) open(ctx context.Context) error {
e.memTracker = memory.NewTracker(e.id, -1)
e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker)
e.childResult = newFirstChunk(e.children[0])
e.memTracker.Consume(e.childResult.MemoryUsage())
e.batched = expression.Vectorizable(e.filters)
if e.batched {
e.selected = make([]bool, 0, chunk.InitialCapacity)
}
e.inputIter = chunk.NewIterator4Chunk(e.childResult)
e.inputRow = e.inputIter.End()
return nil
}
// Close implements plannercore.Plan Close interface.
func (e *SelectionExec) Close() error {
e.memTracker.Consume(-e.childResult.MemoryUsage())
e.childResult = nil
e.selected = nil
return e.baseExecutor.Close()
}
// Next implements the Executor Next interface.
func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if !e.batched {
return e.unBatchedNext(ctx, req)
}
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
if !e.selected[e.inputRow.Idx()] {
continue
}
if req.IsFull() {
return nil
}
req.AppendRow(e.inputRow)
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.children[0], e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
e.selected, err = expression.VectorizedFilter(e.ctx, e.filters, e.inputIter, e.selected)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
}
}
// unBatchedNext filters input rows one by one and returns once an input row is selected.
// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0",
// we have to set batch size to 1 to do the evaluation of filter and projection.
func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error {
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
selected, _, err := expression.EvalBool(e.ctx, e.filters, e.inputRow)
if err != nil {
return err
}
if selected {
chk.AppendRow(e.inputRow)
e.inputRow = e.inputIter.Next()
return nil
}
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.children[0], e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
}
}
// TableScanExec is a table scan executor without result fields.
type TableScanExec struct {
baseExecutor
t table.Table
columns []*model.ColumnInfo
virtualTableChunkList *chunk.List
virtualTableChunkIdx int
}
// Next implements the Executor Next interface.
func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
return e.nextChunk4InfoSchema(ctx, req)
}
func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error {
chk.GrowAndReset(e.maxChunkSize)
if e.virtualTableChunkList == nil {
e.virtualTableChunkList = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize)
columns := make([]*table.Column, e.schema.Len())
for i, colInfo := range e.columns {
columns[i] = table.ToColumn(colInfo)
}
mutableRow := chunk.MutRowFromTypes(retTypes(e))
type tableIter interface {
IterRecords(sessionctx.Context, []*table.Column, table.RecordIterFunc) error
}
err := (e.t.(tableIter)).IterRecords(e.ctx, columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
mutableRow.SetDatums(rec...)
e.virtualTableChunkList.AppendRow(mutableRow.ToRow())
return true, nil
})
if err != nil {
return err
}
}
// no more data.
if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() {
return nil
}
virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx)
e.virtualTableChunkIdx++
chk.SwapColumns(virtualTableChunk)
return nil
}
// Open implements the Executor Open interface.
func (e *TableScanExec) Open(ctx context.Context) error {
e.virtualTableChunkList = nil
return nil
}
// MaxOneRowExec checks if the number of rows that a query returns is at maximum one.
// It's built from subquery expression.
type MaxOneRowExec struct {
baseExecutor
evaluated bool
}
// Open implements the Executor Open interface.
func (e *MaxOneRowExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.evaluated = false
return nil
}
// Next implements the Executor Next interface.
func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.evaluated {
return nil
}
e.evaluated = true
err := Next(ctx, e.children[0], req)
if err != nil {
return err
}
if num := req.NumRows(); num == 0 {
for i := range e.schema.Columns {
req.AppendNull(i)
}
return nil
} else if num != 1 {
return ErrSubqueryMoreThan1Row
}
childChunk := newFirstChunk(e.children[0])
err = Next(ctx, e.children[0], childChunk)
if err != nil {
return err
}
if childChunk.NumRows() != 0 {
return ErrSubqueryMoreThan1Row
}
return nil
}
// UnionExec pulls all it's children's result and returns to its parent directly.
// A "resultPuller" is started for every child to pull result from that child and push it to the "resultPool", the used
// "Chunk" is obtained from the corresponding "resourcePool". All resultPullers are running concurrently.
// +----------------+
// +---> resourcePool 1 ---> | resultPuller 1 |-----+
// | +----------------+ |
// | |
// | +----------------+ v
// +---> resourcePool 2 ---> | resultPuller 2 |-----> resultPool ---+
// | +----------------+ ^ |
// | ...... | |
// | +----------------+ | |
// +---> resourcePool n ---> | resultPuller n |-----+ |
// | +----------------+ |
// | |
// | +-------------+ |
// |--------------------------| main thread | <---------------------+
// +-------------+
type UnionExec struct {
baseExecutor
concurrency int
childIDChan chan int
stopFetchData atomic.Value
finished chan struct{}
resourcePools []chan *chunk.Chunk
resultPool chan *unionWorkerResult
results []*chunk.Chunk
wg sync.WaitGroup
initialized bool
}
// unionWorkerResult stores the result for a union worker.
// A "resultPuller" is started for every child to pull result from that child, unionWorkerResult is used to store that pulled result.
// "src" is used for Chunk reuse: after pulling result from "resultPool", main-thread must push a valid unused Chunk to "src" to
// enable the corresponding "resultPuller" continue to work.
type unionWorkerResult struct {
chk *chunk.Chunk
err error
src chan<- *chunk.Chunk
}
func (e *UnionExec) waitAllFinished() {
e.wg.Wait()
close(e.resultPool)
}
// Open implements the Executor Open interface.
func (e *UnionExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.stopFetchData.Store(false)
e.initialized = false
e.finished = make(chan struct{})
return nil
}
func (e *UnionExec) initialize(ctx context.Context) {
if e.concurrency > len(e.children) {
e.concurrency = len(e.children)
}
for i := 0; i < e.concurrency; i++ {
e.results = append(e.results, newFirstChunk(e.children[0]))
}
e.resultPool = make(chan *unionWorkerResult, e.concurrency)
e.resourcePools = make([]chan *chunk.Chunk, e.concurrency)
e.childIDChan = make(chan int, len(e.children))
for i := 0; i < e.concurrency; i++ {
e.resourcePools[i] = make(chan *chunk.Chunk, 1)
e.resourcePools[i] <- e.results[i]
e.wg.Add(1)
go e.resultPuller(ctx, i)
}
for i := 0; i < len(e.children); i++ {
e.childIDChan <- i
}
close(e.childIDChan)
go e.waitAllFinished()
}
func (e *UnionExec) resultPuller(ctx context.Context, workerID int) {
result := &unionWorkerResult{
err: nil,
chk: nil,
src: e.resourcePools[workerID],
}
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.Logger(ctx).Error("resultPuller panicked", zap.String("stack", string(buf)))
result.err = errors.Errorf("%v", r)
e.resultPool <- result
e.stopFetchData.Store(true)
}
e.wg.Done()
}()
for childID := range e.childIDChan {
for {
if e.stopFetchData.Load().(bool) {
return
}
select {
case <-e.finished:
return
case result.chk = <-e.resourcePools[workerID]:
}
result.err = Next(ctx, e.children[childID], result.chk)
if result.err == nil && result.chk.NumRows() == 0 {
e.resourcePools[workerID] <- result.chk
break
}
e.resultPool <- result
if result.err != nil {
e.stopFetchData.Store(true)
return
}
}
}
}
// Next implements the Executor Next interface.
func (e *UnionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if !e.initialized {
e.initialize(ctx)
e.initialized = true
}
result, ok := <-e.resultPool
if !ok {
return nil
}
if result.err != nil {
return errors.Trace(result.err)
}
if result.chk.NumCols() != req.NumCols() {
return errors.Errorf("Internal error: UnionExec chunk column count mismatch, req: %d, result: %d",
req.NumCols(), result.chk.NumCols())
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
// Close implements the Executor Close interface.
func (e *UnionExec) Close() error {
if e.finished != nil {
close(e.finished)
}
e.results = nil
if e.resultPool != nil {
for range e.resultPool {
}
}
e.resourcePools = nil
if e.childIDChan != nil {
for range e.childIDChan {
}
}
return e.baseExecutor.Close()
}
// ResetContextOfStmt resets the StmtContext and session variables.
// Before every execution, we must clear statement context.
func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
vars := ctx.GetSessionVars()
sc := &stmtctx.StatementContext{
TimeZone: vars.Location(),
MemTracker: memory.NewTracker(memory.LabelForSQLText, vars.MemQuotaQuery),
DiskTracker: disk.NewTracker(memory.LabelForSQLText, -1),
TaskID: stmtctx.AllocateTaskID(),
}
sc.MemTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker)
globalConfig := config.GetGlobalConfig()
if globalConfig.OOMUseTmpStorage && GlobalDiskUsageTracker != nil {
sc.DiskTracker.AttachToGlobalTracker(GlobalDiskUsageTracker)
}
switch globalConfig.OOMAction {
case config.OOMActionCancel:
action := &memory.PanicOnExceed{ConnID: ctx.GetSessionVars().ConnectionID}
action.SetLogHook(domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
sc.MemTracker.SetActionOnExceed(action)
case config.OOMActionLog:
fallthrough
default:
action := &memory.LogOnExceed{ConnID: ctx.GetSessionVars().ConnectionID}
action.SetLogHook(domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
sc.MemTracker.SetActionOnExceed(action)
}
if execStmt, ok := s.(*ast.ExecuteStmt); ok {
s, err = planner.GetPreparedStmt(execStmt, vars)
if err != nil {
return
}
}
// execute missed stmtID uses empty sql
sc.OriginalSQL = s.Text()
if explainStmt, ok := s.(*ast.ExplainStmt); ok {
sc.InExplainStmt = true
sc.IgnoreExplainIDSuffix = (strings.ToLower(explainStmt.Format) == ast.ExplainFormatBrief)
s = explainStmt.Stmt
}
if _, ok := s.(*ast.ExplainForStmt); ok {
sc.InExplainStmt = true
}
// TODO: Many same bool variables here.
// We should set only two variables (
// IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and
// pushing them down to TiKV as flags.
switch stmt := s.(type) {
case *ast.UpdateStmt:
ResetUpdateStmtCtx(sc, stmt, vars)
case *ast.DeleteStmt:
sc.InDeleteStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
case *ast.InsertStmt:
sc.InInsertStmt = true
// For insert statement (not for update statement), disabling the StrictSQLMode
// should make TruncateAsWarning and DividedByZeroAsWarning,
// but should not make DupKeyAsWarning.
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.IgnoreNoPartition = stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
case *ast.CreateTableStmt, *ast.AlterTableStmt:
sc.InCreateOrAlterStmt = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || sc.AllowInvalidDate
case *ast.LoadDataStmt:
sc.DupKeyAsWarning = true
sc.BadNullAsWarning = true
sc.TruncateAsWarning = !vars.StrictSQLMode
sc.InLoadDataStmt = true
// return warning instead of error when load data meet no partition for value
sc.IgnoreNoPartition = true
case *ast.SelectStmt:
sc.InSelectStmt = true
// see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict
// said "For statements such as SELECT that do not change data, invalid values
// generate a warning in strict mode, not an error."
// and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
sc.OverflowAsWarning = true
// Return warning for truncate error in selection.
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if opts := stmt.SelectStmtOpts; opts != nil {
sc.Priority = opts.Priority
sc.NotFillCache = !opts.SQLCache
}
case *ast.SetOprStmt:
sc.InSelectStmt = true
sc.OverflowAsWarning = true
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
case *ast.ShowStmt:
sc.IgnoreTruncate = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors {
sc.InShowWarning = true
sc.SetWarnings(vars.StmtCtx.GetWarnings())
}
case *ast.SplitRegionStmt:
sc.IgnoreTruncate = false
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
default:
sc.IgnoreTruncate = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
}
vars.PreparedParams = vars.PreparedParams[:0]
if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority {
sc.Priority = priority
}
if vars.StmtCtx.LastInsertID > 0 {
sc.PrevLastInsertID = vars.StmtCtx.LastInsertID
} else {
sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID
}
sc.PrevAffectedRows = 0
if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt {
sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows())
} else if vars.StmtCtx.InSelectStmt {
sc.PrevAffectedRows = -1
}
if globalConfig.EnableCollectExecutionInfo {
sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl()
}
sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool)
errCount, warnCount := vars.StmtCtx.NumErrorWarnings()
vars.SysErrorCount = errCount
vars.SysWarningCount = warnCount
vars.StmtCtx = sc
vars.PrevFoundInPlanCache = vars.FoundInPlanCache
vars.FoundInPlanCache = false
vars.ClearStmtVars()
vars.PrevFoundInBinding = vars.FoundInBinding
vars.FoundInBinding = false
return
}
// ResetUpdateStmtCtx resets statement context for UpdateStmt.
func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) {
sc.InUpdateStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
sc.IgnoreNoPartition = stmt.IgnoreErr
}
// FillVirtualColumnValue will calculate the virtual column value by evaluating generated
// expression using rows from a chunk, and then fill this value into the chunk
func FillVirtualColumnValue(virtualRetTypes []*types.FieldType, virtualColumnIndex []int,
schema *expression.Schema, columns []*model.ColumnInfo, sctx sessionctx.Context, req *chunk.Chunk) error {
virCols := chunk.NewChunkWithCapacity(virtualRetTypes, req.Capacity())
iter := chunk.NewIterator4Chunk(req)
for i, idx := range virtualColumnIndex {
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
datum, err := schema.Columns[idx].EvalVirtualColumn(row)
if err != nil {
return err
}
// Because the expression might return different type from
// the generated column, we should wrap a CAST on the result.
castDatum, err := table.CastValue(sctx, datum, columns[idx], false, true)
if err != nil {
return err
}
// Handle the bad null error.
if (mysql.HasNotNullFlag(columns[idx].Flag) || mysql.HasPreventNullInsertFlag(columns[idx].Flag)) && castDatum.IsNull() {
castDatum = table.GetZeroValue(columns[idx])
}
virCols.AppendDatum(i, &castDatum)
}
req.SetCol(idx, virCols.Column(i))
}
return nil
}
| executor/executor.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.43725308775901794,
0.0028562454972416162,
0.00015899345453362912,
0.00017011139425449073,
0.03268544003367424
] |
{
"id": 0,
"code_window": [
"\tplannercore \"github.com/pingcap/tidb/planner/core\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\ttikvstore \"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\ttikvutil \"github.com/pingcap/tidb/store/tikv/util\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "executor/executor.go",
"type": "add",
"edit_start_line_idx": 49
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package math
import "math"
// Abs implement the abs function according to http://cavaliercoder.com/blog/optimized-abs-for-int64-in-go.html
func Abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
}
// uintSizeTable is used as a table to do comparison to get uint length is faster than doing loop on division with 10
var uintSizeTable = [21]uint64{
0, // redundant 0 here, so to make function StrLenOfUint64Fast to count from 1 and return i directly
9, 99, 999, 9999, 99999,
999999, 9999999, 99999999, 999999999, 9999999999,
99999999999, 999999999999, 9999999999999, 99999999999999, 999999999999999,
9999999999999999, 99999999999999999, 999999999999999999, 9999999999999999999,
math.MaxUint64,
} // math.MaxUint64 is 18446744073709551615 and it has 20 digits
// StrLenOfUint64Fast efficiently calculate the string character lengths of an uint64 as input
func StrLenOfUint64Fast(x uint64) int {
for i := 1; ; i++ {
if x <= uintSizeTable[i] {
return i
}
}
}
// StrLenOfInt64Fast efficiently calculate the string character lengths of an int64 as input
func StrLenOfInt64Fast(x int64) int {
size := 0
if x < 0 {
size = 1 // add "-" sign on the length count
}
return size + StrLenOfUint64Fast(uint64(Abs(x)))
}
| util/math/math.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0003184509987477213,
0.0001932269660755992,
0.00016481647617183626,
0.0001681147696217522,
0.00005609429717878811
] |
{
"id": 0,
"code_window": [
"\tplannercore \"github.com/pingcap/tidb/planner/core\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\ttikvstore \"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\ttikvutil \"github.com/pingcap/tidb/store/tikv/util\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "executor/executor.go",
"type": "add",
"edit_start_line_idx": 49
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package infoschema_test
import (
"strings"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/util/set"
)
type metricSchemaSuite struct{}
var _ = Suite(&metricSchemaSuite{})
func (s *metricSchemaSuite) SetUpSuite(c *C) {
}
func (s *metricSchemaSuite) TearDownSuite(c *C) {
}
func (s *metricSchemaSuite) TestMetricSchemaDef(c *C) {
for name, def := range infoschema.MetricTableMap {
if strings.Contains(def.PromQL, "$QUANTILE") || strings.Contains(def.PromQL, "histogram_quantile") {
c.Assert(def.Quantile > 0, IsTrue, Commentf("the quantile of metric table %v should > 0", name))
} else {
c.Assert(def.Quantile == 0, IsTrue, Commentf("metric table %v has quantile, but doesn't contain $QUANTILE in promQL ", name))
}
if strings.Contains(def.PromQL, "$LABEL_CONDITIONS") {
c.Assert(len(def.Labels) > 0, IsTrue, Commentf("the labels of metric table %v should not be nil", name))
} else {
li := strings.Index(def.PromQL, "{")
ri := strings.Index(def.PromQL, "}")
// ri - li > 1 means already has label conditions, so no need $LABEL_CONDITIONS any more.
if !(ri-li > 1) {
c.Assert(len(def.Labels) == 0, IsTrue, Commentf("metric table %v has labels, but doesn't contain $LABEL_CONDITIONS in promQL", name))
}
}
if strings.Contains(def.PromQL, " by (") {
for _, label := range def.Labels {
c.Assert(strings.Contains(def.PromQL, label), IsTrue, Commentf("metric table %v has labels, but doesn't contain label %v in promQL", name, label))
}
}
if name != strings.ToLower(name) {
c.Assert(name, Equals, strings.ToLower(name), Commentf("metric table name %v should be lower case", name))
}
// INSTANCE must be the first label
if set.NewStringSet(def.Labels...).Exist("instance") {
c.Assert(def.Labels[0], Equals, "instance", Commentf("metrics table %v: expect `instance`is the first label but got %v", name, def.Labels))
}
}
}
| infoschema/metrics_schema_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00024251776630990207,
0.00017986218153964728,
0.00016434647841379046,
0.00017030817980412394,
0.000025743593141669407
] |
{
"id": 0,
"code_window": [
"\tplannercore \"github.com/pingcap/tidb/planner/core\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/variable\"\n",
"\ttikvstore \"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\ttikvutil \"github.com/pingcap/tidb/store/tikv/util\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/tablecodec\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "executor/executor.go",
"type": "add",
"edit_start_line_idx": 49
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package owner
import (
"context"
"fmt"
"math"
"net"
"os"
"sync"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/testleak"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
// Ignore this test on the windows platform, because calling unix socket with address in
// host:port format fails on windows.
func TestT(t *testing.T) {
CustomVerboseFlag = true
logLevel := os.Getenv("log_level")
err := logutil.InitLogger(logutil.NewLogConfig(logLevel, "", "", logutil.EmptyFileLogConfig, false))
if err != nil {
t.Fatal(err)
}
TestingT(t)
}
var _ = Suite(&testSuite{})
type testSuite struct {
}
func (s *testSuite) SetUpSuite(c *C) {
}
func (s *testSuite) TearDownSuite(c *C) {
}
var (
dialTimeout = 5 * time.Second
retryCnt = math.MaxInt32
)
func (s *testSuite) TestFailNewSession(c *C) {
os.Remove("new_session:0")
ln, err := net.Listen("unix", "new_session:0")
c.Assert(err, IsNil)
addr := ln.Addr()
endpoints := []string{fmt.Sprintf("%s://%s", addr.Network(), addr.String())}
c.Assert(err, IsNil)
srv := grpc.NewServer(grpc.ConnectionTimeout(time.Minute))
var stop sync.WaitGroup
stop.Add(1)
go func() {
if err = srv.Serve(ln); err != nil {
c.Errorf("can't serve gRPC requests %v", err)
}
stop.Done()
}()
leakFunc := testleak.AfterTest(c)
defer func() {
srv.Stop()
stop.Wait()
leakFunc()
}()
func() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
})
c.Assert(err, IsNil)
defer func() {
if cli != nil {
cli.Close()
}
c.Assert(failpoint.Disable("github.com/pingcap/tidb/owner/closeClient"), IsNil)
}()
c.Assert(failpoint.Enable("github.com/pingcap/tidb/owner/closeClient", `return(true)`), IsNil)
_, err = NewSession(context.Background(), "fail_new_serssion", cli, retryCnt, ManagerSessionTTL)
isContextDone := terror.ErrorEqual(grpc.ErrClientConnClosing, err) || terror.ErrorEqual(context.Canceled, err)
c.Assert(isContextDone, IsTrue, Commentf("err %v", err))
}()
func() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
})
c.Assert(err, IsNil)
defer func() {
if cli != nil {
cli.Close()
}
c.Assert(failpoint.Disable("github.com/pingcap/tidb/owner/closeGrpc"), IsNil)
}()
c.Assert(failpoint.Enable("github.com/pingcap/tidb/owner/closeGrpc", `return(true)`), IsNil)
_, err = NewSession(context.Background(), "fail_new_serssion", cli, retryCnt, ManagerSessionTTL)
isContextDone := terror.ErrorEqual(grpc.ErrClientConnClosing, err) || terror.ErrorEqual(context.Canceled, err)
c.Assert(isContextDone, IsTrue, Commentf("err %v", err))
}()
}
| owner/fail_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0003185062378179282,
0.00019300471467431635,
0.0001649675687076524,
0.00017064613348338753,
0.00004728528074338101
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn nil\n",
"\t}\n",
"\tlockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout\n",
"\tif e.Lock.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\tlockWaitTime = kv.LockNoWait\n",
"\t} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\tlockWaitTime = int64(e.Lock.WaitSec) * 1000\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tlockWaitTime = tikv.LockNoWait\n"
],
"file_path": "executor/executor.go",
"type": "replace",
"edit_start_line_idx": 950
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.9946107864379883,
0.056695520877838135,
0.00015923104365356266,
0.00017508516611997038,
0.20957262814044952
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn nil\n",
"\t}\n",
"\tlockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout\n",
"\tif e.Lock.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\tlockWaitTime = kv.LockNoWait\n",
"\t} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\tlockWaitTime = int64(e.Lock.WaitSec) * 1000\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tlockWaitTime = tikv.LockNoWait\n"
],
"file_path": "executor/executor.go",
"type": "replace",
"edit_start_line_idx": 950
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"container/heap"
"context"
"math/rand"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tipb/go-tipb"
)
// RowSampleCollector collects the samples from the source and organize the samples by row.
// It will maintain the following things:
// Row samples.
// FM sketches(To calculate the NDV).
// Null counts.
// The data sizes.
// The number of rows.
// It uses weighted reservoir sampling(A-Res) to do the sampling.
type RowSampleCollector struct {
Samples WeightedRowSampleHeap
NullCount []int64
FMSketches []*FMSketch
TotalSizes []int64
Count int64
MaxSampleSize int
}
// RowSampleItem is the item for the RowSampleCollector. The weight is needed for the sampling algorithm.
type RowSampleItem struct {
Columns []types.Datum
Weight int64
}
// WeightedRowSampleHeap implements the Heap interface.
type WeightedRowSampleHeap []*RowSampleItem
// Len implements the Heap interface.
func (h WeightedRowSampleHeap) Len() int {
return len(h)
}
// Swap implements the Heap interface.
func (h WeightedRowSampleHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
// Less implements the Heap interface.
func (h WeightedRowSampleHeap) Less(i, j int) bool {
return h[i].Weight < h[j].Weight
}
// Push implements the Heap interface.
func (h *WeightedRowSampleHeap) Push(i interface{}) {
*h = append(*h, i.(*RowSampleItem))
}
// Pop implements the Heap interface.
func (h *WeightedRowSampleHeap) Pop() interface{} {
old := *h
n := len(old)
item := old[n-1]
*h = old[:n-1]
return item
}
// RowSampleBuilder is used to construct the RowSampleCollector to get the samples.
type RowSampleBuilder struct {
Sc *stmtctx.StatementContext
RecordSet sqlexec.RecordSet
ColsFieldType []*types.FieldType
Collators []collate.Collator
ColGroups [][]int64
MaxSampleSize int
MaxFMSketchSize int
Rng *rand.Rand
}
// Collect first builds the collector. Then maintain the null count, FM sketch and the data size for each column and
// column group.
// Then use the weighted reservoir sampling to collect the samples.
func (s *RowSampleBuilder) Collect() (*RowSampleCollector, error) {
collector := &RowSampleCollector{
Samples: make(WeightedRowSampleHeap, 0, s.MaxSampleSize),
NullCount: make([]int64, len(s.ColsFieldType)+len(s.ColGroups)),
FMSketches: make([]*FMSketch, 0, len(s.ColsFieldType)+len(s.ColGroups)),
TotalSizes: make([]int64, len(s.ColsFieldType)+len(s.ColGroups)),
MaxSampleSize: s.MaxSampleSize,
}
for i := 0; i < len(s.ColsFieldType)+len(s.ColGroups); i++ {
collector.FMSketches = append(collector.FMSketches, NewFMSketch(s.MaxFMSketchSize))
}
ctx := context.TODO()
chk := s.RecordSet.NewChunk()
it := chunk.NewIterator4Chunk(chk)
for {
err := s.RecordSet.Next(ctx, chk)
if err != nil {
return nil, err
}
if chk.NumRows() == 0 {
return collector, nil
}
collector.Count += int64(chk.NumRows())
for row := it.Begin(); row != it.End(); row = it.Next() {
datums := RowToDatums(row, s.RecordSet.Fields())
for i, val := range datums {
// For string values, we use the collation key instead of the original value.
if s.Collators[i] != nil && !val.IsNull() {
decodedVal, err := tablecodec.DecodeColumnValue(val.GetBytes(), s.ColsFieldType[i], s.Sc.TimeZone)
if err != nil {
return nil, err
}
decodedVal.SetBytesAsString(s.Collators[i].Key(decodedVal.GetString()), decodedVal.Collation(), uint32(decodedVal.Length()))
encodedKey, err := tablecodec.EncodeValue(s.Sc, nil, decodedVal)
if err != nil {
return nil, err
}
val.SetBytes(encodedKey)
}
}
err := collector.collectColumns(s.Sc, datums)
if err != nil {
return nil, err
}
err = collector.collectColumnGroups(s.Sc, datums, s.ColGroups)
if err != nil {
return nil, err
}
weight := s.Rng.Int63()
newCols := make([]types.Datum, len(datums))
for i := range datums {
datums[i].Copy(&newCols[i])
}
item := &RowSampleItem{
Columns: newCols,
Weight: weight,
}
collector.sampleZippedRow(item)
}
}
}
func (s *RowSampleCollector) collectColumns(sc *stmtctx.StatementContext, cols []types.Datum) error {
for i, col := range cols {
if col.IsNull() {
s.NullCount[i]++
continue
}
s.TotalSizes[i] += int64(len(col.GetBytes())) - 1
// Minus one is to remove the flag byte.
err := s.FMSketches[i].InsertValue(sc, col)
if err != nil {
return err
}
}
return nil
}
func (s *RowSampleCollector) collectColumnGroups(sc *stmtctx.StatementContext, cols []types.Datum, colGroups [][]int64) error {
colLen := len(cols)
datumBuffer := make([]types.Datum, 0, len(cols))
for i, group := range colGroups {
datumBuffer = datumBuffer[:0]
hasNull := true
for _, c := range group {
datumBuffer = append(datumBuffer, cols[c])
hasNull = hasNull && cols[c].IsNull()
s.TotalSizes[colLen+i] += int64(len(cols[c].GetBytes())) - 1
}
// We don't maintain the null counts information for the multi-column group
if hasNull && len(group) == 1 {
s.NullCount[colLen+i]++
continue
}
err := s.FMSketches[colLen+i].InsertRowValue(sc, datumBuffer)
if err != nil {
return err
}
}
return nil
}
func (s *RowSampleCollector) sampleZippedRow(sample *RowSampleItem) {
if len(s.Samples) < s.MaxSampleSize {
s.Samples = append(s.Samples, sample)
if len(s.Samples) == s.MaxSampleSize {
heap.Init(&s.Samples)
}
return
}
if s.Samples[0].Weight < sample.Weight {
s.Samples[0] = sample
heap.Fix(&s.Samples, 0)
}
}
// ToProto converts the collector to proto struct.
func (s *RowSampleCollector) ToProto() *tipb.RowSampleCollector {
pbFMSketches := make([]*tipb.FMSketch, 0, len(s.FMSketches))
for _, sketch := range s.FMSketches {
pbFMSketches = append(pbFMSketches, FMSketchToProto(sketch))
}
collector := &tipb.RowSampleCollector{
Samples: RowSamplesToProto(s.Samples),
NullCounts: s.NullCount,
Count: s.Count,
FmSketch: pbFMSketches,
TotalSize: s.TotalSizes,
}
return collector
}
// FromProto constructs the collector from the proto struct.
func (s *RowSampleCollector) FromProto(pbCollector *tipb.RowSampleCollector) {
s.Count = pbCollector.Count
s.NullCount = pbCollector.NullCounts
s.FMSketches = make([]*FMSketch, 0, len(pbCollector.FmSketch))
for _, pbSketch := range pbCollector.FmSketch {
s.FMSketches = append(s.FMSketches, FMSketchFromProto(pbSketch))
}
s.TotalSizes = pbCollector.TotalSize
s.Samples = make(WeightedRowSampleHeap, 0, len(pbCollector.Samples))
for _, pbSample := range pbCollector.Samples {
data := make([]types.Datum, 0, len(pbSample.Row))
for _, col := range pbSample.Row {
b := make([]byte, len(col))
copy(b, col)
data = append(data, types.NewBytesDatum(b))
}
// The samples collected from regions are also organized by binary heap. So we can just copy the slice.
// No need to maintain the heap again.
s.Samples = append(s.Samples, &RowSampleItem{
Columns: data,
Weight: pbSample.Weight,
})
}
}
// MergeCollector merges the collectors to a final one.
func (s *RowSampleCollector) MergeCollector(subCollector *RowSampleCollector) {
s.Count += subCollector.Count
for i := range subCollector.FMSketches {
s.FMSketches[i].MergeFMSketch(subCollector.FMSketches[i])
}
for i := range subCollector.NullCount {
s.NullCount[i] += subCollector.NullCount[i]
}
for i := range subCollector.TotalSizes {
s.TotalSizes[i] += subCollector.TotalSizes[i]
}
for _, sample := range subCollector.Samples {
s.sampleZippedRow(sample)
}
}
// RowSamplesToProto converts the samp slice to the pb struct.
func RowSamplesToProto(samples WeightedRowSampleHeap) []*tipb.RowSample {
if len(samples) == 0 {
return nil
}
rows := make([]*tipb.RowSample, 0, len(samples))
colLen := len(samples[0].Columns)
for _, sample := range samples {
pbRow := &tipb.RowSample{
Row: make([][]byte, 0, colLen),
Weight: sample.Weight,
}
for _, c := range sample.Columns {
pbRow.Row = append(pbRow.Row, c.GetBytes())
}
rows = append(rows, pbRow)
}
return rows
}
| statistics/row_sampler.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017921533435583115,
0.00017238316650036722,
0.00016509788110852242,
0.00017138931434601545,
0.000004327357146394206
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn nil\n",
"\t}\n",
"\tlockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout\n",
"\tif e.Lock.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\tlockWaitTime = kv.LockNoWait\n",
"\t} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\tlockWaitTime = int64(e.Lock.WaitSec) * 1000\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tlockWaitTime = tikv.LockNoWait\n"
],
"file_path": "executor/executor.go",
"type": "replace",
"edit_start_line_idx": 950
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"github.com/pingcap/parser/mysql"
)
const maxColumnNameSize = 256
// ColumnInfo contains information of a column
type ColumnInfo struct {
Schema string
Table string
OrgTable string
Name string
OrgName string
ColumnLength uint32
Charset uint16
Flag uint16
Decimal uint8
Type uint8
DefaultValueLength uint64
DefaultValue []byte
}
// Dump dumps ColumnInfo to bytes.
func (column *ColumnInfo) Dump(buffer []byte) []byte {
nameDump, orgnameDump := []byte(column.Name), []byte(column.OrgName)
if len(nameDump) > maxColumnNameSize {
nameDump = nameDump[0:maxColumnNameSize]
}
if len(orgnameDump) > maxColumnNameSize {
orgnameDump = orgnameDump[0:maxColumnNameSize]
}
buffer = dumpLengthEncodedString(buffer, []byte("def"))
buffer = dumpLengthEncodedString(buffer, []byte(column.Schema))
buffer = dumpLengthEncodedString(buffer, []byte(column.Table))
buffer = dumpLengthEncodedString(buffer, []byte(column.OrgTable))
buffer = dumpLengthEncodedString(buffer, nameDump)
buffer = dumpLengthEncodedString(buffer, orgnameDump)
buffer = append(buffer, 0x0c)
buffer = dumpUint16(buffer, column.Charset)
buffer = dumpUint32(buffer, column.ColumnLength)
buffer = append(buffer, dumpType(column.Type))
buffer = dumpUint16(buffer, dumpFlag(column.Type, column.Flag))
buffer = append(buffer, column.Decimal)
buffer = append(buffer, 0, 0)
if column.DefaultValue != nil {
buffer = dumpUint64(buffer, uint64(len(column.DefaultValue)))
buffer = append(buffer, column.DefaultValue...)
}
return buffer
}
func dumpFlag(tp byte, flag uint16) uint16 {
switch tp {
case mysql.TypeSet:
return flag | uint16(mysql.SetFlag)
case mysql.TypeEnum:
return flag | uint16(mysql.EnumFlag)
default:
if mysql.HasBinaryFlag(uint(flag)) {
return flag | uint16(mysql.NotNullFlag)
}
return flag
}
}
func dumpType(tp byte) byte {
switch tp {
case mysql.TypeSet, mysql.TypeEnum:
return mysql.TypeString
default:
return tp
}
}
| server/column.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00019000869360752404,
0.00017540522094350308,
0.00016667216550558805,
0.00017692413530312479,
0.000006535798092954792
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn nil\n",
"\t}\n",
"\tlockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout\n",
"\tif e.Lock.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\tlockWaitTime = kv.LockNoWait\n",
"\t} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\tlockWaitTime = int64(e.Lock.WaitSec) * 1000\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tlockWaitTime = tikv.LockNoWait\n"
],
"file_path": "executor/executor.go",
"type": "replace",
"edit_start_line_idx": 950
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
// distsql metrics.
var (
DistSQLQueryHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "handle_query_duration_seconds",
Help: "Bucketed histogram of processing time (s) of handled queries.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
}, []string{LblType, LblSQLType})
DistSQLScanKeysPartialHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "scan_keys_partial_num",
Help: "number of scanned keys for each partial result.",
},
)
DistSQLScanKeysHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "scan_keys_num",
Help: "number of scanned keys for each query.",
},
)
DistSQLPartialCountHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "partial_num",
Help: "number of partial results for each query.",
},
)
DistSQLCoprCacheHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "distsql",
Name: "copr_cache",
Help: "coprocessor cache hit, evict and miss number",
Buckets: prometheus.ExponentialBuckets(1, 2, 16),
}, []string{LblType})
)
| metrics/distsql.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00018015937530435622,
0.00017414268222637475,
0.0001660978450672701,
0.00017301557818427682,
0.000004595670361595694
] |
{
"id": 2,
"code_window": [
"\tWaitScatterRegionFinish(ctx context.Context, regionID uint64, backOff int) error\n",
"\tCheckRegionInScattering(regionID uint64) (bool, error)\n",
"}\n",
"\n",
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [],
"file_path": "kv/kv.go",
"type": "replace",
"edit_start_line_idx": 426
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"fmt"
math2 "math"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/opcode"
"github.com/pingcap/parser/terror"
ptypes "github.com/pingcap/parser/types"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/math"
"github.com/pingcap/tidb/util/plancodec"
"github.com/pingcap/tidb/util/stringutil"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
// PointGetPlan is a fast plan for simple point get.
// When we detect that the statement has a unique equal access condition, this plan is used.
// This plan is much faster to build and to execute because it avoid the optimization and coprocessor cost.
type PointGetPlan struct {
basePlan
dbName string
schema *expression.Schema
TblInfo *model.TableInfo
IndexInfo *model.IndexInfo
PartitionInfo *model.PartitionDefinition
Handle kv.Handle
HandleParam *driver.ParamMarkerExpr
IndexValues []types.Datum
IndexValueParams []*driver.ParamMarkerExpr
IdxCols []*expression.Column
IdxColLens []int
AccessConditions []expression.Expression
ctx sessionctx.Context
UnsignedHandle bool
IsTableDual bool
Lock bool
outputNames []*types.FieldName
LockWaitTime int64
partitionColumnPos int
Columns []*model.ColumnInfo
cost float64
}
type nameValuePair struct {
colName string
value types.Datum
param *driver.ParamMarkerExpr
}
// Schema implements the Plan interface.
func (p *PointGetPlan) Schema() *expression.Schema {
return p.schema
}
// Cost implements PhysicalPlan interface
func (p *PointGetPlan) Cost() float64 {
return p.cost
}
// SetCost implements PhysicalPlan interface
func (p *PointGetPlan) SetCost(cost float64) {
p.cost = cost
}
// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of
// current task. If the child's task is cop task, some operator may close this task and return a new rootTask.
func (p *PointGetPlan) attach2Task(...task) task {
return nil
}
// ToPB converts physical plan to tipb executor.
func (p *PointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, nil
}
// Clone implements PhysicalPlan interface.
func (p *PointGetPlan) Clone() (PhysicalPlan, error) {
return nil, errors.Errorf("%T doesn't support cloning.", p)
}
// ExplainInfo implements Plan interface.
func (p *PointGetPlan) ExplainInfo() string {
accessObject, operatorInfo := p.AccessObject(false), p.OperatorInfo(false)
if len(operatorInfo) == 0 {
return accessObject
}
return accessObject + ", " + operatorInfo
}
// ExplainNormalizedInfo implements Plan interface.
func (p *PointGetPlan) ExplainNormalizedInfo() string {
accessObject, operatorInfo := p.AccessObject(true), p.OperatorInfo(true)
if len(operatorInfo) == 0 {
return accessObject
}
return accessObject + ", " + operatorInfo
}
// AccessObject implements dataAccesser interface.
func (p *PointGetPlan) AccessObject(normalized bool) string {
buffer := bytes.NewBufferString("")
tblName := p.TblInfo.Name.O
fmt.Fprintf(buffer, "table:%s", tblName)
if p.PartitionInfo != nil {
if normalized {
fmt.Fprintf(buffer, ", partition:?")
} else {
fmt.Fprintf(buffer, ", partition:%s", p.PartitionInfo.Name.L)
}
}
if p.IndexInfo != nil {
if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle {
buffer.WriteString(", clustered index:" + p.IndexInfo.Name.O + "(")
} else {
buffer.WriteString(", index:" + p.IndexInfo.Name.O + "(")
}
for i, idxCol := range p.IndexInfo.Columns {
if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden {
buffer.WriteString(tblCol.GeneratedExprString)
} else {
buffer.WriteString(idxCol.Name.O)
}
if i+1 < len(p.IndexInfo.Columns) {
buffer.WriteString(", ")
}
}
buffer.WriteString(")")
}
return buffer.String()
}
// OperatorInfo implements dataAccesser interface.
func (p *PointGetPlan) OperatorInfo(normalized bool) string {
buffer := bytes.NewBufferString("")
if p.Handle != nil {
if normalized {
fmt.Fprintf(buffer, "handle:?, ")
} else {
if p.UnsignedHandle {
fmt.Fprintf(buffer, "handle:%d, ", uint64(p.Handle.IntValue()))
} else {
fmt.Fprintf(buffer, "handle:%s, ", p.Handle)
}
}
}
if p.Lock {
fmt.Fprintf(buffer, "lock, ")
}
if buffer.Len() >= 2 {
buffer.Truncate(buffer.Len() - 2)
}
return buffer.String()
}
// ExtractCorrelatedCols implements PhysicalPlan interface.
func (p *PointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn {
return nil
}
// GetChildReqProps gets the required property by child index.
func (p *PointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty {
return nil
}
// StatsCount will return the the RowCount of property.StatsInfo for this plan.
func (p *PointGetPlan) StatsCount() float64 {
return 1
}
// statsInfo will return the the RowCount of property.StatsInfo for this plan.
func (p *PointGetPlan) statsInfo() *property.StatsInfo {
if p.stats == nil {
p.stats = &property.StatsInfo{}
}
p.stats.RowCount = 1
return p.stats
}
// Children gets all the children.
func (p *PointGetPlan) Children() []PhysicalPlan {
return nil
}
// SetChildren sets the children for the plan.
func (p *PointGetPlan) SetChildren(...PhysicalPlan) {}
// SetChild sets a specific child for the plan.
func (p *PointGetPlan) SetChild(i int, child PhysicalPlan) {}
// ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices.
func (p *PointGetPlan) ResolveIndices() error {
return resolveIndicesForVirtualColumn(p.schema.Columns, p.schema)
}
// OutputNames returns the outputting names of each column.
func (p *PointGetPlan) OutputNames() types.NameSlice {
return p.outputNames
}
// SetOutputNames sets the outputting name by the given slice.
func (p *PointGetPlan) SetOutputNames(names types.NameSlice) {
p.outputNames = names
}
// GetCost returns cost of the PointGetPlan.
func (p *PointGetPlan) GetCost(cols []*expression.Column) float64 {
sessVars := p.ctx.GetSessionVars()
var rowSize float64
cost := 0.0
if p.IndexInfo == nil {
rowSize = p.stats.HistColl.GetTableAvgRowSize(p.ctx, cols, kv.TiKV, true)
} else {
rowSize = p.stats.HistColl.GetIndexAvgRowSize(p.ctx, cols, p.IndexInfo.Unique)
}
cost += rowSize * sessVars.NetworkFactor
cost += sessVars.SeekFactor
cost /= float64(sessVars.DistSQLScanConcurrency())
return cost
}
// BatchPointGetPlan represents a physical plan which contains a bunch of
// keys reference the same table and use the same `unique key`
type BatchPointGetPlan struct {
baseSchemaProducer
ctx sessionctx.Context
dbName string
TblInfo *model.TableInfo
IndexInfo *model.IndexInfo
Handles []kv.Handle
HandleParams []*driver.ParamMarkerExpr
IndexValues [][]types.Datum
IndexValueParams [][]*driver.ParamMarkerExpr
AccessConditions []expression.Expression
IdxCols []*expression.Column
IdxColLens []int
PartitionColPos int
KeepOrder bool
Desc bool
Lock bool
LockWaitTime int64
Columns []*model.ColumnInfo
cost float64
// SinglePart indicates whether this BatchPointGetPlan is just for a single partition, instead of the whole partition table.
// If the BatchPointGetPlan is built in fast path, this value if false; if the plan is generated in physical optimization for a partition,
// this value would be true. This value would decide the behavior of BatchPointGetExec, i.e, whether to compute the table ID of the partition
// on the fly.
SinglePart bool
// PartTblID is the table ID for the specific table partition.
PartTblID int64
}
// Cost implements PhysicalPlan interface
func (p *BatchPointGetPlan) Cost() float64 {
return p.cost
}
// SetCost implements PhysicalPlan interface
func (p *BatchPointGetPlan) SetCost(cost float64) {
p.cost = cost
}
// Clone implements PhysicalPlan interface.
func (p *BatchPointGetPlan) Clone() (PhysicalPlan, error) {
return nil, errors.Errorf("%T doesn't support cloning", p)
}
// ExtractCorrelatedCols implements PhysicalPlan interface.
func (p *BatchPointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn {
return nil
}
// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of
// current task. If the child's task is cop task, some operator may close this task and return a new rootTask.
func (p *BatchPointGetPlan) attach2Task(...task) task {
return nil
}
// ToPB converts physical plan to tipb executor.
func (p *BatchPointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, nil
}
// ExplainInfo implements Plan interface.
func (p *BatchPointGetPlan) ExplainInfo() string {
return p.AccessObject(false) + ", " + p.OperatorInfo(false)
}
// ExplainNormalizedInfo implements Plan interface.
func (p *BatchPointGetPlan) ExplainNormalizedInfo() string {
return p.AccessObject(true) + ", " + p.OperatorInfo(true)
}
// AccessObject implements physicalScan interface.
func (p *BatchPointGetPlan) AccessObject(_ bool) string {
buffer := bytes.NewBufferString("")
tblName := p.TblInfo.Name.O
fmt.Fprintf(buffer, "table:%s", tblName)
if p.IndexInfo != nil {
if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle {
buffer.WriteString(", clustered index:" + p.IndexInfo.Name.O + "(")
} else {
buffer.WriteString(", index:" + p.IndexInfo.Name.O + "(")
}
for i, idxCol := range p.IndexInfo.Columns {
if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden {
buffer.WriteString(tblCol.GeneratedExprString)
} else {
buffer.WriteString(idxCol.Name.O)
}
if i+1 < len(p.IndexInfo.Columns) {
buffer.WriteString(", ")
}
}
buffer.WriteString(")")
}
return buffer.String()
}
// OperatorInfo implements dataAccesser interface.
func (p *BatchPointGetPlan) OperatorInfo(normalized bool) string {
buffer := bytes.NewBufferString("")
if p.IndexInfo == nil {
if normalized {
fmt.Fprintf(buffer, "handle:?, ")
} else {
fmt.Fprintf(buffer, "handle:%v, ", p.Handles)
}
}
fmt.Fprintf(buffer, "keep order:%v, ", p.KeepOrder)
fmt.Fprintf(buffer, "desc:%v, ", p.Desc)
if p.Lock {
fmt.Fprintf(buffer, "lock, ")
}
if buffer.Len() >= 2 {
buffer.Truncate(buffer.Len() - 2)
}
return buffer.String()
}
// GetChildReqProps gets the required property by child index.
func (p *BatchPointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty {
return nil
}
// StatsCount will return the the RowCount of property.StatsInfo for this plan.
func (p *BatchPointGetPlan) StatsCount() float64 {
return p.statsInfo().RowCount
}
// statsInfo will return the the RowCount of property.StatsInfo for this plan.
func (p *BatchPointGetPlan) statsInfo() *property.StatsInfo {
return p.stats
}
// Children gets all the children.
func (p *BatchPointGetPlan) Children() []PhysicalPlan {
return nil
}
// SetChildren sets the children for the plan.
func (p *BatchPointGetPlan) SetChildren(...PhysicalPlan) {}
// SetChild sets a specific child for the plan.
func (p *BatchPointGetPlan) SetChild(i int, child PhysicalPlan) {}
// ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices.
func (p *BatchPointGetPlan) ResolveIndices() error {
return resolveIndicesForVirtualColumn(p.schema.Columns, p.schema)
}
// OutputNames returns the outputting names of each column.
func (p *BatchPointGetPlan) OutputNames() types.NameSlice {
return p.names
}
// SetOutputNames sets the outputting name by the given slice.
func (p *BatchPointGetPlan) SetOutputNames(names types.NameSlice) {
p.names = names
}
// GetCost returns cost of the PointGetPlan.
func (p *BatchPointGetPlan) GetCost(cols []*expression.Column) float64 {
sessVars := p.ctx.GetSessionVars()
var rowSize, rowCount float64
cost := 0.0
if p.IndexInfo == nil {
rowCount = float64(len(p.Handles))
rowSize = p.stats.HistColl.GetTableAvgRowSize(p.ctx, cols, kv.TiKV, true)
} else {
rowCount = float64(len(p.IndexValues))
rowSize = p.stats.HistColl.GetIndexAvgRowSize(p.ctx, cols, p.IndexInfo.Unique)
}
cost += rowCount * rowSize * sessVars.NetworkFactor
cost += rowCount * sessVars.SeekFactor
cost /= float64(sessVars.DistSQLScanConcurrency())
return cost
}
// PointPlanKey is used to get point plan that is pre-built for multi-statement query.
const PointPlanKey = stringutil.StringerStr("pointPlanKey")
// PointPlanVal is used to store point plan that is pre-built for multi-statement query.
// Save the plan in a struct so even if the point plan is nil, we don't need to try again.
type PointPlanVal struct {
Plan Plan
}
// TryFastPlan tries to use the PointGetPlan for the query.
func TryFastPlan(ctx sessionctx.Context, node ast.Node) (p Plan) {
ctx.GetSessionVars().PlanID = 0
ctx.GetSessionVars().PlanColumnID = 0
switch x := node.(type) {
case *ast.SelectStmt:
defer func() {
if ctx.GetSessionVars().SelectLimit != math2.MaxUint64 && p != nil {
ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("sql_select_limit is set, so point get plan is not activated"))
p = nil
}
}()
// Try to convert the `SELECT a, b, c FROM t WHERE (a, b, c) in ((1, 2, 4), (1, 3, 5))` to
// `PhysicalUnionAll` which children are `PointGet` if exists an unique key (a, b, c) in table `t`
if fp := tryWhereIn2BatchPointGet(ctx, x); fp != nil {
if checkFastPlanPrivilege(ctx, fp.dbName, fp.TblInfo.Name.L, mysql.SelectPriv) != nil {
return
}
if tidbutil.IsMemDB(fp.dbName) {
return nil
}
fp.Lock, fp.LockWaitTime = getLockWaitTime(ctx, x.LockInfo)
p = fp
return
}
if fp := tryPointGetPlan(ctx, x, isForUpdateReadSelectLock(x.LockInfo)); fp != nil {
if checkFastPlanPrivilege(ctx, fp.dbName, fp.TblInfo.Name.L, mysql.SelectPriv) != nil {
return nil
}
if tidbutil.IsMemDB(fp.dbName) {
return nil
}
if fp.IsTableDual {
tableDual := PhysicalTableDual{}
tableDual.names = fp.outputNames
tableDual.SetSchema(fp.Schema())
p = tableDual.Init(ctx, &property.StatsInfo{}, 0)
return
}
fp.Lock, fp.LockWaitTime = getLockWaitTime(ctx, x.LockInfo)
p = fp
return
}
case *ast.UpdateStmt:
return tryUpdatePointPlan(ctx, x)
case *ast.DeleteStmt:
return tryDeletePointPlan(ctx, x)
}
return nil
}
// IsSelectForUpdateLockType checks if the select lock type is for update type.
func IsSelectForUpdateLockType(lockType ast.SelectLockType) bool {
if lockType == ast.SelectLockForUpdate ||
lockType == ast.SelectLockForShare ||
lockType == ast.SelectLockForUpdateNoWait ||
lockType == ast.SelectLockForUpdateWaitN {
return true
}
return false
}
func getLockWaitTime(ctx sessionctx.Context, lockInfo *ast.SelectLockInfo) (lock bool, waitTime int64) {
if lockInfo != nil {
if IsSelectForUpdateLockType(lockInfo.LockType) {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
sessVars := ctx.GetSessionVars()
if !sessVars.IsAutocommit() || sessVars.InTxn() {
lock = true
waitTime = sessVars.LockWaitTimeout
if lockInfo.LockType == ast.SelectLockForUpdateWaitN {
waitTime = int64(lockInfo.WaitSec * 1000)
} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {
waitTime = kv.LockNoWait
}
}
}
}
return
}
func newBatchPointGetPlan(
ctx sessionctx.Context, patternInExpr *ast.PatternInExpr,
handleCol *model.ColumnInfo, tbl *model.TableInfo, schema *expression.Schema,
names []*types.FieldName, whereColNames []string, indexHints []*ast.IndexHint,
) *BatchPointGetPlan {
statsInfo := &property.StatsInfo{RowCount: float64(len(patternInExpr.List))}
var partitionColName *ast.ColumnName
if tbl.GetPartitionInfo() != nil {
partitionColName = getHashPartitionColumnName(ctx, tbl)
if partitionColName == nil {
return nil
}
}
if handleCol != nil {
var handles = make([]kv.Handle, len(patternInExpr.List))
var handleParams = make([]*driver.ParamMarkerExpr, len(patternInExpr.List))
for i, item := range patternInExpr.List {
// SELECT * FROM t WHERE (key) in ((1), (2))
if p, ok := item.(*ast.ParenthesesExpr); ok {
item = p.Expr
}
var d types.Datum
var param *driver.ParamMarkerExpr
switch x := item.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
default:
return nil
}
if d.IsNull() {
return nil
}
if !checkCanConvertInPointGet(handleCol, d) {
return nil
}
intDatum, err := d.ConvertTo(ctx.GetSessionVars().StmtCtx, &handleCol.FieldType)
if err != nil {
return nil
}
// The converted result must be same as original datum
cmp, err := intDatum.CompareDatum(ctx.GetSessionVars().StmtCtx, &d)
if err != nil || cmp != 0 {
return nil
}
handles[i] = kv.IntHandle(intDatum.GetInt64())
handleParams[i] = param
}
return BatchPointGetPlan{
TblInfo: tbl,
Handles: handles,
HandleParams: handleParams,
}.Init(ctx, statsInfo, schema, names, 0)
}
// The columns in where clause should be covered by unique index
var matchIdxInfo *model.IndexInfo
permutations := make([]int, len(whereColNames))
colInfos := make([]*model.ColumnInfo, len(whereColNames))
for i, innerCol := range whereColNames {
for _, col := range tbl.Columns {
if col.Name.L == innerCol {
colInfos[i] = col
}
}
}
for _, idxInfo := range tbl.Indices {
if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible ||
!indexIsAvailableByHints(idxInfo, indexHints) {
continue
}
if len(idxInfo.Columns) != len(whereColNames) || idxInfo.HasPrefixIndex() {
continue
}
// TODO: not sure is there any function to reuse
matched := true
for whereColIndex, innerCol := range whereColNames {
var found bool
for i, col := range idxInfo.Columns {
if innerCol == col.Name.L {
permutations[whereColIndex] = i
found = true
break
}
}
if !found {
matched = false
break
}
}
if matched {
matchIdxInfo = idxInfo
break
}
}
if matchIdxInfo == nil {
return nil
}
indexValues := make([][]types.Datum, len(patternInExpr.List))
indexValueParams := make([][]*driver.ParamMarkerExpr, len(patternInExpr.List))
for i, item := range patternInExpr.List {
// SELECT * FROM t WHERE (key) in ((1), (2))
if p, ok := item.(*ast.ParenthesesExpr); ok {
item = p.Expr
}
var values []types.Datum
var valuesParams []*driver.ParamMarkerExpr
switch x := item.(type) {
case *ast.RowExpr:
// The `len(values) == len(valuesParams)` should be satisfied in this mode
if len(x.Values) != len(whereColNames) {
return nil
}
values = make([]types.Datum, len(x.Values))
valuesParams = make([]*driver.ParamMarkerExpr, len(x.Values))
for index, inner := range x.Values {
permIndex := permutations[index]
switch innerX := inner.(type) {
case *driver.ValueExpr:
if !checkCanConvertInPointGet(colInfos[index], innerX.Datum) {
return nil
}
values[permIndex] = innerX.Datum
case *driver.ParamMarkerExpr:
if !checkCanConvertInPointGet(colInfos[index], innerX.Datum) {
return nil
}
values[permIndex] = innerX.Datum
valuesParams[permIndex] = innerX
default:
return nil
}
}
case *driver.ValueExpr:
// if any item is `ValueExpr` type, `Expr` should contain only one column,
// otherwise column count doesn't match and no plan can be built.
if len(whereColNames) != 1 {
return nil
}
if !checkCanConvertInPointGet(colInfos[0], x.Datum) {
return nil
}
values = []types.Datum{x.Datum}
case *driver.ParamMarkerExpr:
if len(whereColNames) != 1 {
return nil
}
if !checkCanConvertInPointGet(colInfos[0], x.Datum) {
return nil
}
values = []types.Datum{x.Datum}
valuesParams = []*driver.ParamMarkerExpr{x}
default:
return nil
}
indexValues[i] = values
indexValueParams[i] = valuesParams
}
return BatchPointGetPlan{
TblInfo: tbl,
IndexInfo: matchIdxInfo,
IndexValues: indexValues,
IndexValueParams: indexValueParams,
PartitionColPos: getPartitionColumnPos(matchIdxInfo, partitionColName),
}.Init(ctx, statsInfo, schema, names, 0)
}
func tryWhereIn2BatchPointGet(ctx sessionctx.Context, selStmt *ast.SelectStmt) *BatchPointGetPlan {
if selStmt.OrderBy != nil || selStmt.GroupBy != nil ||
selStmt.Limit != nil || selStmt.Having != nil ||
len(selStmt.WindowSpecs) > 0 {
return nil
}
in, ok := selStmt.Where.(*ast.PatternInExpr)
if !ok || in.Not || len(in.List) < 1 {
return nil
}
tblName, tblAlias := getSingleTableNameAndAlias(selStmt.From)
if tblName == nil {
return nil
}
tbl := tblName.TableInfo
if tbl == nil {
return nil
}
// Skip the optimization with partition selection.
if len(tblName.PartitionNames) > 0 {
return nil
}
for _, col := range tbl.Columns {
if col.IsGenerated() || col.State != model.StatePublic {
return nil
}
}
schema, names := buildSchemaFromFields(tblName.Schema, tbl, tblAlias, selStmt.Fields.Fields)
if schema == nil {
return nil
}
var (
handleCol *model.ColumnInfo
whereColNames []string
)
// SELECT * FROM t WHERE (key) in ((1), (2))
colExpr := in.Expr
if p, ok := colExpr.(*ast.ParenthesesExpr); ok {
colExpr = p.Expr
}
switch colName := colExpr.(type) {
case *ast.ColumnNameExpr:
if name := colName.Name.Table.L; name != "" && name != tblAlias.L {
return nil
}
// Try use handle
if tbl.PKIsHandle {
for _, col := range tbl.Columns {
if mysql.HasPriKeyFlag(col.Flag) && col.Name.L == colName.Name.Name.L {
handleCol = col
whereColNames = append(whereColNames, col.Name.L)
break
}
}
}
if handleCol == nil {
// Downgrade to use unique index
whereColNames = append(whereColNames, colName.Name.Name.L)
}
case *ast.RowExpr:
for _, col := range colName.Values {
c, ok := col.(*ast.ColumnNameExpr)
if !ok {
return nil
}
if name := c.Name.Table.L; name != "" && name != tblAlias.L {
return nil
}
whereColNames = append(whereColNames, c.Name.Name.L)
}
default:
return nil
}
p := newBatchPointGetPlan(ctx, in, handleCol, tbl, schema, names, whereColNames, tblName.IndexHints)
if p == nil {
return nil
}
p.dbName = tblName.Schema.L
if p.dbName == "" {
p.dbName = ctx.GetSessionVars().CurrentDB
}
return p
}
// tryPointGetPlan determine if the SelectStmt can use a PointGetPlan.
// Returns nil if not applicable.
// To use the PointGetPlan the following rules must be satisfied:
// 1. For the limit clause, the count should at least 1 and the offset is 0.
// 2. It must be a single table select.
// 3. All the columns must be public and generated.
// 4. The condition is an access path that the range is a unique key.
func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt, check bool) *PointGetPlan {
if selStmt.Having != nil {
return nil
} else if selStmt.Limit != nil {
count, offset, err := extractLimitCountOffset(ctx, selStmt.Limit)
if err != nil || count == 0 || offset > 0 {
return nil
}
}
tblName, tblAlias := getSingleTableNameAndAlias(selStmt.From)
if tblName == nil {
return nil
}
tbl := tblName.TableInfo
if tbl == nil {
return nil
}
pi := tbl.GetPartitionInfo()
if pi != nil && pi.Type != model.PartitionTypeHash {
return nil
}
for _, col := range tbl.Columns {
// Do not handle generated columns.
if col.IsGenerated() {
return nil
}
// Only handle tables that all columns are public.
if col.State != model.StatePublic {
return nil
}
}
schema, names := buildSchemaFromFields(tblName.Schema, tbl, tblAlias, selStmt.Fields.Fields)
if schema == nil {
return nil
}
dbName := tblName.Schema.L
if dbName == "" {
dbName = ctx.GetSessionVars().CurrentDB
}
pairs := make([]nameValuePair, 0, 4)
pairs, isTableDual := getNameValuePairs(ctx.GetSessionVars().StmtCtx, tbl, tblAlias, pairs, selStmt.Where)
if pairs == nil && !isTableDual {
return nil
}
var partitionInfo *model.PartitionDefinition
var pos int
if pi != nil {
partitionInfo, pos = getPartitionInfo(ctx, tbl, pairs)
if partitionInfo == nil {
return nil
}
// Take partition selection into consideration.
if len(tblName.PartitionNames) > 0 {
if !partitionNameInSet(partitionInfo.Name, tblName.PartitionNames) {
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
}
}
handlePair, fieldType := findPKHandle(tbl, pairs)
if handlePair.value.Kind() != types.KindNull && len(pairs) == 1 && indexIsAvailableByHints(nil, tblName.IndexHints) {
if isTableDual {
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
p := newPointGetPlan(ctx, dbName, schema, tbl, names)
p.Handle = kv.IntHandle(handlePair.value.GetInt64())
p.UnsignedHandle = mysql.HasUnsignedFlag(fieldType.Flag)
p.HandleParam = handlePair.param
p.PartitionInfo = partitionInfo
return p
} else if handlePair.value.Kind() != types.KindNull {
return nil
}
check = check && ctx.GetSessionVars().ConnectionID > 0
var latestIndexes map[int64]*model.IndexInfo
var err error
for _, idxInfo := range tbl.Indices {
if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible ||
!indexIsAvailableByHints(idxInfo, tblName.IndexHints) {
continue
}
if isTableDual {
if check && latestIndexes == nil {
latestIndexes, check, err = getLatestIndexInfo(ctx, tbl.ID, 0)
if err != nil {
logutil.BgLogger().Warn("get information schema failed", zap.Error(err))
return nil
}
}
if check {
if latestIndex, ok := latestIndexes[idxInfo.ID]; !ok || latestIndex.State != model.StatePublic {
continue
}
}
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
idxValues, idxValueParams := getIndexValues(idxInfo, pairs)
if idxValues == nil {
continue
}
if check && latestIndexes == nil {
latestIndexes, check, err = getLatestIndexInfo(ctx, tbl.ID, 0)
if err != nil {
logutil.BgLogger().Warn("get information schema failed", zap.Error(err))
return nil
}
}
if check {
if latestIndex, ok := latestIndexes[idxInfo.ID]; !ok || latestIndex.State != model.StatePublic {
continue
}
}
p := newPointGetPlan(ctx, dbName, schema, tbl, names)
p.IndexInfo = idxInfo
p.IndexValues = idxValues
p.IndexValueParams = idxValueParams
p.PartitionInfo = partitionInfo
if p.PartitionInfo != nil {
p.partitionColumnPos = findPartitionIdx(idxInfo, pos, pairs)
}
return p
}
return nil
}
// indexIsAvailableByHints checks whether this index is filtered by these specified index hints.
// idxInfo is PK if it's nil
func indexIsAvailableByHints(idxInfo *model.IndexInfo, idxHints []*ast.IndexHint) bool {
if len(idxHints) == 0 {
return true
}
match := func(name model.CIStr) bool {
if idxInfo == nil {
return name.L == "primary"
}
return idxInfo.Name.L == name.L
}
// NOTICE: it's supposed that ignore hints and use/force hints will not be applied together since the effect of
// the former will be eliminated by the latter.
isIgnore := false
for _, hint := range idxHints {
if hint.HintScope != ast.HintForScan {
continue
}
if hint.HintType == ast.HintIgnore && hint.IndexNames != nil {
isIgnore = true
for _, name := range hint.IndexNames {
if match(name) {
return false
}
}
}
if (hint.HintType == ast.HintForce || hint.HintType == ast.HintUse) && hint.IndexNames != nil {
for _, name := range hint.IndexNames {
if match(name) {
return true
}
}
}
}
return isIgnore
}
func partitionNameInSet(name model.CIStr, pnames []model.CIStr) bool {
for _, pname := range pnames {
// Case insensitive, create table partition p0, query using P0 is OK.
if name.L == pname.L {
return true
}
}
return false
}
func newPointGetPlan(ctx sessionctx.Context, dbName string, schema *expression.Schema, tbl *model.TableInfo, names []*types.FieldName) *PointGetPlan {
p := &PointGetPlan{
basePlan: newBasePlan(ctx, plancodec.TypePointGet, 0),
dbName: dbName,
schema: schema,
TblInfo: tbl,
outputNames: names,
LockWaitTime: ctx.GetSessionVars().LockWaitTimeout,
}
ctx.GetSessionVars().StmtCtx.Tables = []stmtctx.TableEntry{{DB: dbName, Table: tbl.Name.L}}
return p
}
func checkFastPlanPrivilege(ctx sessionctx.Context, dbName, tableName string, checkTypes ...mysql.PrivilegeType) error {
pm := privilege.GetPrivilegeManager(ctx)
var visitInfos []visitInfo
for _, checkType := range checkTypes {
if pm != nil && !pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, dbName, tableName, "", checkType) {
return errors.New("privilege check fail")
}
// This visitInfo is only for table lock check, so we do not need column field,
// just fill it empty string.
visitInfos = append(visitInfos, visitInfo{
privilege: checkType,
db: dbName,
table: tableName,
column: "",
err: nil,
})
}
infoSchema := infoschema.GetInfoSchema(ctx)
return CheckTableLock(ctx, infoSchema, visitInfos)
}
func buildSchemaFromFields(
dbName model.CIStr,
tbl *model.TableInfo,
tblName model.CIStr,
fields []*ast.SelectField,
) (
*expression.Schema,
[]*types.FieldName,
) {
columns := make([]*expression.Column, 0, len(tbl.Columns)+1)
names := make([]*types.FieldName, 0, len(tbl.Columns)+1)
if len(fields) > 0 {
for _, field := range fields {
if field.WildCard != nil {
if field.WildCard.Table.L != "" && field.WildCard.Table.L != tblName.L {
return nil, nil
}
for _, col := range tbl.Columns {
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
ColName: col.Name,
})
columns = append(columns, colInfoToColumn(col, len(columns)))
}
continue
}
colNameExpr, ok := field.Expr.(*ast.ColumnNameExpr)
if !ok {
return nil, nil
}
if colNameExpr.Name.Table.L != "" && colNameExpr.Name.Table.L != tblName.L {
return nil, nil
}
col := findCol(tbl, colNameExpr.Name)
if col == nil {
return nil, nil
}
asName := colNameExpr.Name.Name
if field.AsName.L != "" {
asName = field.AsName
}
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
OrigColName: col.Name,
ColName: asName,
})
columns = append(columns, colInfoToColumn(col, len(columns)))
}
return expression.NewSchema(columns...), names
}
// fields len is 0 for update and delete.
for _, col := range tbl.Columns {
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
ColName: col.Name,
})
column := colInfoToColumn(col, len(columns))
columns = append(columns, column)
}
schema := expression.NewSchema(columns...)
return schema, names
}
// getSingleTableNameAndAlias return the ast node of queried table name and the alias string.
// `tblName` is `nil` if there are multiple tables in the query.
// `tblAlias` will be the real table name if there is no table alias in the query.
func getSingleTableNameAndAlias(tableRefs *ast.TableRefsClause) (tblName *ast.TableName, tblAlias model.CIStr) {
if tableRefs == nil || tableRefs.TableRefs == nil || tableRefs.TableRefs.Right != nil {
return nil, tblAlias
}
tblSrc, ok := tableRefs.TableRefs.Left.(*ast.TableSource)
if !ok {
return nil, tblAlias
}
tblName, ok = tblSrc.Source.(*ast.TableName)
if !ok {
return nil, tblAlias
}
tblAlias = tblSrc.AsName
if tblSrc.AsName.L == "" {
tblAlias = tblName.Name
}
return tblName, tblAlias
}
// getNameValuePairs extracts `column = constant/paramMarker` conditions from expr as name value pairs.
func getNameValuePairs(stmtCtx *stmtctx.StatementContext, tbl *model.TableInfo, tblName model.CIStr, nvPairs []nameValuePair, expr ast.ExprNode) (
pairs []nameValuePair, isTableDual bool) {
binOp, ok := expr.(*ast.BinaryOperationExpr)
if !ok {
return nil, false
}
if binOp.Op == opcode.LogicAnd {
nvPairs, isTableDual = getNameValuePairs(stmtCtx, tbl, tblName, nvPairs, binOp.L)
if nvPairs == nil || isTableDual {
return nil, isTableDual
}
nvPairs, isTableDual = getNameValuePairs(stmtCtx, tbl, tblName, nvPairs, binOp.R)
if nvPairs == nil || isTableDual {
return nil, isTableDual
}
return nvPairs, isTableDual
} else if binOp.Op == opcode.EQ {
var d types.Datum
var colName *ast.ColumnNameExpr
var param *driver.ParamMarkerExpr
var ok bool
if colName, ok = binOp.L.(*ast.ColumnNameExpr); ok {
switch x := binOp.R.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
}
} else if colName, ok = binOp.R.(*ast.ColumnNameExpr); ok {
switch x := binOp.L.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
}
} else {
return nil, false
}
if d.IsNull() {
return nil, false
}
// Views' columns have no FieldType.
if tbl.IsView() {
return nil, false
}
if colName.Name.Table.L != "" && colName.Name.Table.L != tblName.L {
return nil, false
}
col := model.FindColumnInfo(tbl.Cols(), colName.Name.Name.L)
if col == nil || // Handling the case when the column is _tidb_rowid.
(col.Tp == mysql.TypeString && col.Collate == charset.CollationBin) { // This type we needn't to pad `\0` in here.
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: d, param: param}), false
}
if !checkCanConvertInPointGet(col, d) {
return nil, false
}
dVal, err := d.ConvertTo(stmtCtx, &col.FieldType)
if err != nil {
if terror.ErrorEqual(types.ErrOverflow, err) {
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: d, param: param}), true
}
// Some scenarios cast to int with error, but we may use this value in point get.
if !terror.ErrorEqual(types.ErrTruncatedWrongVal, err) {
return nil, false
}
}
// The converted result must be same as original datum.
// Compare them based on the dVal's type.
cmp, err := dVal.CompareDatum(stmtCtx, &d)
if err != nil {
return nil, false
} else if cmp != 0 {
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: dVal, param: param}), true
}
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: dVal, param: param}), false
}
return nil, false
}
func checkCanConvertInPointGet(col *model.ColumnInfo, d types.Datum) bool {
kind := d.Kind()
switch col.FieldType.EvalType() {
case ptypes.ETString:
switch kind {
case types.KindInt64, types.KindUint64,
types.KindFloat32, types.KindFloat64, types.KindMysqlDecimal:
// column type is String and constant type is numeric
return false
}
}
switch col.FieldType.Tp {
case mysql.TypeBit:
switch kind {
case types.KindString:
// column type is Bit and constant type is string
return false
}
}
return true
}
func findPKHandle(tblInfo *model.TableInfo, pairs []nameValuePair) (handlePair nameValuePair, fieldType *types.FieldType) {
if !tblInfo.PKIsHandle {
rowIDIdx := findInPairs("_tidb_rowid", pairs)
if rowIDIdx != -1 {
return pairs[rowIDIdx], types.NewFieldType(mysql.TypeLonglong)
}
return handlePair, nil
}
for _, col := range tblInfo.Columns {
if mysql.HasPriKeyFlag(col.Flag) {
i := findInPairs(col.Name.L, pairs)
if i == -1 {
return handlePair, nil
}
return pairs[i], &col.FieldType
}
}
return handlePair, nil
}
func getIndexValues(idxInfo *model.IndexInfo, pairs []nameValuePair) ([]types.Datum, []*driver.ParamMarkerExpr) {
idxValues := make([]types.Datum, 0, 4)
idxValueParams := make([]*driver.ParamMarkerExpr, 0, 4)
if len(idxInfo.Columns) != len(pairs) {
return nil, nil
}
if idxInfo.HasPrefixIndex() {
return nil, nil
}
for _, idxCol := range idxInfo.Columns {
i := findInPairs(idxCol.Name.L, pairs)
if i == -1 {
return nil, nil
}
idxValues = append(idxValues, pairs[i].value)
idxValueParams = append(idxValueParams, pairs[i].param)
}
if len(idxValues) > 0 {
return idxValues, idxValueParams
}
return nil, nil
}
func findInPairs(colName string, pairs []nameValuePair) int {
for i, pair := range pairs {
if pair.colName == colName {
return i
}
}
return -1
}
func tryUpdatePointPlan(ctx sessionctx.Context, updateStmt *ast.UpdateStmt) Plan {
selStmt := &ast.SelectStmt{
Fields: &ast.FieldList{},
From: updateStmt.TableRefs,
Where: updateStmt.Where,
OrderBy: updateStmt.Order,
Limit: updateStmt.Limit,
}
pointGet := tryPointGetPlan(ctx, selStmt, true)
if pointGet != nil {
if pointGet.IsTableDual {
return PhysicalTableDual{
names: pointGet.outputNames,
}.Init(ctx, &property.StatsInfo{}, 0)
}
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
pointGet.Lock, pointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointUpdatePlan(ctx, pointGet, pointGet.dbName, pointGet.TblInfo, updateStmt)
}
batchPointGet := tryWhereIn2BatchPointGet(ctx, selStmt)
if batchPointGet != nil {
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
batchPointGet.Lock, batchPointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointUpdatePlan(ctx, batchPointGet, batchPointGet.dbName, batchPointGet.TblInfo, updateStmt)
}
return nil
}
func buildPointUpdatePlan(ctx sessionctx.Context, pointPlan PhysicalPlan, dbName string, tbl *model.TableInfo, updateStmt *ast.UpdateStmt) Plan {
if checkFastPlanPrivilege(ctx, dbName, tbl.Name.L, mysql.SelectPriv, mysql.UpdatePriv) != nil {
return nil
}
orderedList, allAssignmentsAreConstant := buildOrderedList(ctx, pointPlan, updateStmt.List)
if orderedList == nil {
return nil
}
handleCols := buildHandleCols(ctx, tbl, pointPlan.Schema())
updatePlan := Update{
SelectPlan: pointPlan,
OrderedList: orderedList,
TblColPosInfos: TblColPosInfoSlice{
TblColPosInfo{
TblID: tbl.ID,
Start: 0,
End: pointPlan.Schema().Len(),
HandleCols: handleCols,
},
},
AllAssignmentsAreConstant: allAssignmentsAreConstant,
VirtualAssignmentsOffset: len(orderedList),
}.Init(ctx)
updatePlan.names = pointPlan.OutputNames()
is := infoschema.GetInfoSchema(ctx)
t, _ := is.TableByID(tbl.ID)
updatePlan.tblID2Table = map[int64]table.Table{
tbl.ID: t,
}
if tbl.GetPartitionInfo() != nil {
pt := t.(table.PartitionedTable)
var updateTableList []*ast.TableName
updateTableList = extractTableList(updateStmt.TableRefs.TableRefs, updateTableList, true)
updatePlan.PartitionedTable = make([]table.PartitionedTable, 0, len(updateTableList))
for _, updateTable := range updateTableList {
if len(updateTable.PartitionNames) > 0 {
pids := make(map[int64]struct{}, len(updateTable.PartitionNames))
for _, name := range updateTable.PartitionNames {
pid, err := tables.FindPartitionByName(tbl, name.L)
if err != nil {
return updatePlan
}
pids[pid] = struct{}{}
}
pt = tables.NewPartitionTableithGivenSets(pt, pids)
}
updatePlan.PartitionedTable = append(updatePlan.PartitionedTable, pt)
}
}
return updatePlan
}
func buildOrderedList(ctx sessionctx.Context, plan Plan, list []*ast.Assignment,
) (orderedList []*expression.Assignment, allAssignmentsAreConstant bool) {
orderedList = make([]*expression.Assignment, 0, len(list))
allAssignmentsAreConstant = true
for _, assign := range list {
idx, err := expression.FindFieldName(plan.OutputNames(), assign.Column)
if idx == -1 || err != nil {
return nil, true
}
col := plan.Schema().Columns[idx]
newAssign := &expression.Assignment{
Col: col,
ColName: plan.OutputNames()[idx].ColName,
}
expr, err := expression.RewriteSimpleExprWithNames(ctx, assign.Expr, plan.Schema(), plan.OutputNames())
if err != nil {
return nil, true
}
expr = expression.BuildCastFunction(ctx, expr, col.GetType())
if allAssignmentsAreConstant {
_, isConst := expr.(*expression.Constant)
allAssignmentsAreConstant = isConst
}
newAssign.Expr, err = expr.ResolveIndices(plan.Schema())
if err != nil {
return nil, true
}
orderedList = append(orderedList, newAssign)
}
return orderedList, allAssignmentsAreConstant
}
func tryDeletePointPlan(ctx sessionctx.Context, delStmt *ast.DeleteStmt) Plan {
if delStmt.IsMultiTable {
return nil
}
selStmt := &ast.SelectStmt{
Fields: &ast.FieldList{},
From: delStmt.TableRefs,
Where: delStmt.Where,
OrderBy: delStmt.Order,
Limit: delStmt.Limit,
}
if pointGet := tryPointGetPlan(ctx, selStmt, true); pointGet != nil {
if pointGet.IsTableDual {
return PhysicalTableDual{
names: pointGet.outputNames,
}.Init(ctx, &property.StatsInfo{}, 0)
}
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
pointGet.Lock, pointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointDeletePlan(ctx, pointGet, pointGet.dbName, pointGet.TblInfo)
}
if batchPointGet := tryWhereIn2BatchPointGet(ctx, selStmt); batchPointGet != nil {
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
batchPointGet.Lock, batchPointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointDeletePlan(ctx, batchPointGet, batchPointGet.dbName, batchPointGet.TblInfo)
}
return nil
}
func buildPointDeletePlan(ctx sessionctx.Context, pointPlan PhysicalPlan, dbName string, tbl *model.TableInfo) Plan {
if checkFastPlanPrivilege(ctx, dbName, tbl.Name.L, mysql.SelectPriv, mysql.DeletePriv) != nil {
return nil
}
handleCols := buildHandleCols(ctx, tbl, pointPlan.Schema())
delPlan := Delete{
SelectPlan: pointPlan,
TblColPosInfos: TblColPosInfoSlice{
TblColPosInfo{
TblID: tbl.ID,
Start: 0,
End: pointPlan.Schema().Len(),
HandleCols: handleCols,
},
},
}.Init(ctx)
return delPlan
}
func findCol(tbl *model.TableInfo, colName *ast.ColumnName) *model.ColumnInfo {
for _, col := range tbl.Columns {
if col.Name.L == colName.Name.L {
return col
}
}
return nil
}
func colInfoToColumn(col *model.ColumnInfo, idx int) *expression.Column {
return &expression.Column{
RetType: col.FieldType.Clone(),
ID: col.ID,
UniqueID: int64(col.Offset),
Index: idx,
OrigName: col.Name.L,
}
}
func buildHandleCols(ctx sessionctx.Context, tbl *model.TableInfo, schema *expression.Schema) HandleCols {
// fields len is 0 for update and delete.
if tbl.PKIsHandle {
for i, col := range tbl.Columns {
if mysql.HasPriKeyFlag(col.Flag) {
return &IntHandleCols{col: schema.Columns[i]}
}
}
}
if tbl.IsCommonHandle {
pkIdx := tables.FindPrimaryIndex(tbl)
return NewCommonHandleCols(ctx.GetSessionVars().StmtCtx, tbl, pkIdx, schema.Columns)
}
handleCol := colInfoToColumn(model.NewExtraHandleColInfo(), schema.Len())
schema.Append(handleCol)
return &IntHandleCols{col: handleCol}
}
func getPartitionInfo(ctx sessionctx.Context, tbl *model.TableInfo, pairs []nameValuePair) (*model.PartitionDefinition, int) {
partitionColName := getHashPartitionColumnName(ctx, tbl)
if partitionColName == nil {
return nil, 0
}
pi := tbl.Partition
for i, pair := range pairs {
if partitionColName.Name.L == pair.colName {
val := pair.value.GetInt64()
pos := math.Abs(val % int64(pi.Num))
return &pi.Definitions[pos], i
}
}
return nil, 0
}
func findPartitionIdx(idxInfo *model.IndexInfo, pos int, pairs []nameValuePair) int {
for i, idxCol := range idxInfo.Columns {
if idxCol.Name.L == pairs[pos].colName {
return i
}
}
return 0
}
// getPartitionColumnPos gets the partition column's position in the index.
func getPartitionColumnPos(idx *model.IndexInfo, partitionColName *ast.ColumnName) int {
if partitionColName == nil {
return 0
}
for i, idxCol := range idx.Columns {
if partitionColName.Name.L == idxCol.Name.L {
return i
}
}
panic("unique index must include all partition columns")
}
func getHashPartitionColumnName(ctx sessionctx.Context, tbl *model.TableInfo) *ast.ColumnName {
pi := tbl.GetPartitionInfo()
if pi == nil {
return nil
}
if pi.Type != model.PartitionTypeHash {
return nil
}
is := infoschema.GetInfoSchema(ctx)
table, ok := is.TableByID(tbl.ID)
if !ok {
return nil
}
// PartitionExpr don't need columns and names for hash partition.
partitionExpr, err := table.(partitionTable).PartitionExpr()
if err != nil {
return nil
}
expr := partitionExpr.OrigExpr
col, ok := expr.(*ast.ColumnNameExpr)
if !ok {
return nil
}
return col.Name
}
| planner/core/point_get_plan.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.012707462534308434,
0.00045391800813376904,
0.0001638281246414408,
0.00016925363161135465,
0.0015754676423966885
] |
{
"id": 2,
"code_window": [
"\tWaitScatterRegionFinish(ctx context.Context, regionID uint64, backOff int) error\n",
"\tCheckRegionInScattering(regionID uint64) (bool, error)\n",
"}\n",
"\n",
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [],
"file_path": "kv/kv.go",
"type": "replace",
"edit_start_line_idx": 426
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"bytes"
"context"
"fmt"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/types"
)
var _ = Suite(&testTableSuite{})
type testTableSuite struct {
store kv.Storage
dbInfo *model.DBInfo
d *ddl
}
func testTableInfoWith2IndexOnFirstColumn(c *C, d *ddl, name string, num int) *model.TableInfo {
normalInfo := testTableInfo(c, d, name, num)
idxs := make([]*model.IndexInfo, 0, 2)
for i := range idxs {
idx := &model.IndexInfo{
Name: model.NewCIStr(fmt.Sprintf("i%d", i+1)),
State: model.StatePublic,
Columns: []*model.IndexColumn{{Name: model.NewCIStr("c1")}},
}
idxs = append(idxs, idx)
}
normalInfo.Indices = idxs
normalInfo.Columns[0].FieldType.Flen = 11
return normalInfo
}
// testTableInfo creates a test table with num int columns and with no index.
func testTableInfo(c *C, d *ddl, name string, num int) *model.TableInfo {
tblInfo := &model.TableInfo{
Name: model.NewCIStr(name),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tblInfo.ID = genIDs[0]
cols := make([]*model.ColumnInfo, num)
for i := range cols {
col := &model.ColumnInfo{
Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)),
Offset: i,
DefaultValue: i + 1,
State: model.StatePublic,
}
col.FieldType = *types.NewFieldType(mysql.TypeLong)
col.ID = allocateColumnID(tblInfo)
cols[i] = col
}
tblInfo.Columns = cols
tblInfo.Charset = "utf8"
tblInfo.Collate = "utf8_bin"
return tblInfo
}
// testTableInfoWithPartition creates a test table with num int columns and with no index.
func testTableInfoWithPartition(c *C, d *ddl, name string, num int) *model.TableInfo {
tblInfo := testTableInfo(c, d, name, num)
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
pid := genIDs[0]
tblInfo.Partition = &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tblInfo.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{{
ID: pid,
Name: model.NewCIStr("p0"),
LessThan: []string{"maxvalue"},
}},
}
return tblInfo
}
// testTableInfoWithPartitionLessThan creates a test table with num int columns and one partition specified with lessthan.
func testTableInfoWithPartitionLessThan(c *C, d *ddl, name string, num int, lessthan string) *model.TableInfo {
tblInfo := testTableInfoWithPartition(c, d, name, num)
tblInfo.Partition.Definitions[0].LessThan = []string{lessthan}
return tblInfo
}
func testAddedNewTablePartitionInfo(c *C, d *ddl, tblInfo *model.TableInfo, partName, lessthan string) *model.PartitionInfo {
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
pid := genIDs[0]
// the new added partition should change the partition state to state none at the beginning.
return &model.PartitionInfo{
Type: model.PartitionTypeRange,
Expr: tblInfo.Columns[0].Name.L,
Enable: true,
Definitions: []model.PartitionDefinition{{
ID: pid,
Name: model.NewCIStr(partName),
LessThan: []string{lessthan},
}},
}
}
// testViewInfo creates a test view with num int columns.
func testViewInfo(c *C, d *ddl, name string, num int) *model.TableInfo {
tblInfo := &model.TableInfo{
Name: model.NewCIStr(name),
}
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
tblInfo.ID = genIDs[0]
cols := make([]*model.ColumnInfo, num)
viewCols := make([]model.CIStr, num)
var stmtBuffer bytes.Buffer
stmtBuffer.WriteString("SELECT ")
for i := range cols {
col := &model.ColumnInfo{
Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)),
Offset: i,
State: model.StatePublic,
}
col.ID = allocateColumnID(tblInfo)
cols[i] = col
viewCols[i] = col.Name
stmtBuffer.WriteString(cols[i].Name.L + ",")
}
stmtBuffer.WriteString("1 FROM t")
view := model.ViewInfo{Cols: viewCols, Security: model.SecurityDefiner, Algorithm: model.AlgorithmMerge,
SelectStmt: stmtBuffer.String(), CheckOption: model.CheckOptionCascaded, Definer: &auth.UserIdentity{CurrentUser: true}}
tblInfo.View = &view
tblInfo.Columns = cols
return tblInfo
}
func testCreateTable(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionCreateTable,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{tblInfo},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
tblInfo.State = model.StatePublic
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
tblInfo.State = model.StateNone
return job
}
func testCreateView(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionCreateView,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{tblInfo, false, 0},
}
c.Assert(tblInfo.IsView(), IsTrue)
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
tblInfo.State = model.StatePublic
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
tblInfo.State = model.StateNone
return job
}
func testRenameTable(c *C, ctx sessionctx.Context, d *ddl, newSchemaID, oldSchemaID int64, tblInfo *model.TableInfo) *model.Job {
job := &model.Job{
SchemaID: newSchemaID,
TableID: tblInfo.ID,
Type: model.ActionRenameTable,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{oldSchemaID, tblInfo.Name},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
tblInfo.State = model.StatePublic
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
tblInfo.State = model.StateNone
return job
}
func testLockTable(c *C, ctx sessionctx.Context, d *ddl, newSchemaID int64, tblInfo *model.TableInfo, lockTp model.TableLockType) *model.Job {
arg := &lockTablesArg{
LockTables: []model.TableLockTpInfo{{SchemaID: newSchemaID, TableID: tblInfo.ID, Tp: lockTp}},
SessionInfo: model.SessionInfo{
ServerID: d.GetID(),
SessionID: ctx.GetSessionVars().ConnectionID,
},
}
job := &model.Job{
SchemaID: newSchemaID,
TableID: tblInfo.ID,
Type: model.ActionLockTable,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{arg},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v})
return job
}
func checkTableLockedTest(c *C, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, serverID string, sessionID uint64, lockTp model.TableLockType) {
err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
info, err := t.GetTable(dbInfo.ID, tblInfo.ID)
c.Assert(err, IsNil)
c.Assert(info, NotNil)
c.Assert(info.Lock, NotNil)
c.Assert(len(info.Lock.Sessions) == 1, IsTrue)
c.Assert(info.Lock.Sessions[0].ServerID, Equals, serverID)
c.Assert(info.Lock.Sessions[0].SessionID, Equals, sessionID)
c.Assert(info.Lock.Tp, Equals, lockTp)
c.Assert(info.Lock.State, Equals, model.TableLockStatePublic)
return nil
})
c.Assert(err, IsNil)
}
func testDropTable(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job {
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionDropTable,
BinlogInfo: &model.HistoryInfo{},
}
err := d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testTruncateTable(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job {
genIDs, err := d.genGlobalIDs(1)
c.Assert(err, IsNil)
newTableID := genIDs[0]
job := &model.Job{
SchemaID: dbInfo.ID,
TableID: tblInfo.ID,
Type: model.ActionTruncateTable,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{newTableID},
}
err = d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
v := getSchemaVer(c, ctx)
tblInfo.ID = newTableID
checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})
return job
}
func testCheckTableState(c *C, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, state model.SchemaState) {
err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
info, err := t.GetTable(dbInfo.ID, tblInfo.ID)
c.Assert(err, IsNil)
if state == model.StateNone {
c.Assert(info, IsNil)
return nil
}
c.Assert(info.Name, DeepEquals, tblInfo.Name)
c.Assert(info.State, Equals, state)
return nil
})
c.Assert(err, IsNil)
}
func testGetTable(c *C, d *ddl, schemaID int64, tableID int64) table.Table {
tbl, err := testGetTableWithError(d, schemaID, tableID)
c.Assert(err, IsNil)
return tbl
}
func testGetTableWithError(d *ddl, schemaID, tableID int64) (table.Table, error) {
var tblInfo *model.TableInfo
err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
var err1 error
tblInfo, err1 = t.GetTable(schemaID, tableID)
if err1 != nil {
return errors.Trace(err1)
}
return nil
})
if err != nil {
return nil, errors.Trace(err)
}
if tblInfo == nil {
return nil, errors.New("table not found")
}
alloc := autoid.NewAllocator(d.store, schemaID, false, autoid.RowIDAllocType)
tbl, err := table.TableFromMeta(autoid.NewAllocators(alloc), tblInfo)
if err != nil {
return nil, errors.Trace(err)
}
return tbl, nil
}
func (s *testTableSuite) SetUpSuite(c *C) {
s.store = testCreateStore(c, "test_table")
s.d = testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
s.dbInfo = testSchemaInfo(c, s.d, "test")
testCreateSchema(c, testNewContext(s.d), s.d, s.dbInfo)
}
func (s *testTableSuite) TearDownSuite(c *C) {
testDropSchema(c, testNewContext(s.d), s.d, s.dbInfo)
err := s.d.Stop()
c.Assert(err, IsNil)
err = s.store.Close()
c.Assert(err, IsNil)
}
func (s *testTableSuite) TestTable(c *C) {
d := s.d
ctx := testNewContext(d)
tblInfo := testTableInfo(c, d, "t", 3)
job := testCreateTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic)
testCheckJobDone(c, d, job, true)
// Create an existing table.
newTblInfo := testTableInfo(c, d, "t", 3)
doDDLJobErr(c, s.dbInfo.ID, newTblInfo.ID, model.ActionCreateTable, []interface{}{newTblInfo}, ctx, d)
count := 2000
tbl := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
for i := 1; i <= count; i++ {
_, err := tbl.AddRecord(ctx, types.MakeDatums(i, i, i))
c.Assert(err, IsNil)
}
job = testDropTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckJobDone(c, d, job, false)
// for truncate table
tblInfo = testTableInfo(c, d, "tt", 3)
job = testCreateTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic)
testCheckJobDone(c, d, job, true)
job = testTruncateTable(c, ctx, d, s.dbInfo, tblInfo)
testCheckTableState(c, d, s.dbInfo, tblInfo, model.StatePublic)
testCheckJobDone(c, d, job, true)
// for rename table
dbInfo1 := testSchemaInfo(c, s.d, "test_rename_table")
testCreateSchema(c, testNewContext(s.d), s.d, dbInfo1)
job = testRenameTable(c, ctx, d, dbInfo1.ID, s.dbInfo.ID, tblInfo)
testCheckTableState(c, d, dbInfo1, tblInfo, model.StatePublic)
testCheckJobDone(c, d, job, true)
job = testLockTable(c, ctx, d, dbInfo1.ID, tblInfo, model.TableLockWrite)
testCheckTableState(c, d, dbInfo1, tblInfo, model.StatePublic)
testCheckJobDone(c, d, job, true)
checkTableLockedTest(c, d, dbInfo1, tblInfo, d.GetID(), ctx.GetSessionVars().ConnectionID, model.TableLockWrite)
}
| ddl/table_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0007877504103817046,
0.00019090504792984575,
0.00016485185187775642,
0.00016987547860480845,
0.00009709104779176414
] |
{
"id": 2,
"code_window": [
"\tWaitScatterRegionFinish(ctx context.Context, regionID uint64, backOff int) error\n",
"\tCheckRegionInScattering(regionID uint64) (bool, error)\n",
"}\n",
"\n",
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [],
"file_path": "kv/kv.go",
"type": "replace",
"edit_start_line_idx": 426
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl_test
import (
"time"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/util/mock"
"go.etcd.io/etcd/clientv3"
)
type ddlOptionsSuite struct{}
var _ = Suite(&ddlOptionsSuite{})
func (s *ddlOptionsSuite) TestOptions(c *C) {
client, err := clientv3.NewFromURL("test")
c.Assert(err, IsNil)
callback := &ddl.BaseCallback{}
lease := time.Second * 3
store := &mock.Store{}
infoHandle := infoschema.NewHandle(store)
options := []ddl.Option{
ddl.WithEtcdClient(client),
ddl.WithHook(callback),
ddl.WithLease(lease),
ddl.WithStore(store),
ddl.WithInfoHandle(infoHandle),
}
opt := &ddl.Options{}
for _, o := range options {
o(opt)
}
c.Assert(opt.EtcdCli, Equals, client)
c.Assert(opt.Hook, Equals, callback)
c.Assert(opt.Lease, Equals, lease)
c.Assert(opt.Store, Equals, store)
c.Assert(opt.InfoHandle, Equals, infoHandle)
}
| ddl/options_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017781989299692214,
0.00017236564599443227,
0.00016731690266169608,
0.00017211625527124852,
0.000003173599225192447
] |
{
"id": 2,
"code_window": [
"\tWaitScatterRegionFinish(ctx context.Context, regionID uint64, backOff int) error\n",
"\tCheckRegionInScattering(regionID uint64) (bool, error)\n",
"}\n",
"\n",
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace"
],
"after_edit": [],
"file_path": "kv/kv.go",
"type": "replace",
"edit_start_line_idx": 426
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"encoding/json"
"reflect"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/testleak"
)
var _ = Suite(&testVarsutilSuite{})
type testVarsutilSuite struct {
}
func (s *testVarsutilSuite) TestTiDBOptOn(c *C) {
defer testleak.AfterTest(c)()
tbl := []struct {
val string
on bool
}{
{"ON", true},
{"on", true},
{"On", true},
{"1", true},
{"off", false},
{"No", false},
{"0", false},
{"1.1", false},
{"", false},
}
for _, t := range tbl {
on := TiDBOptOn(t.val)
c.Assert(on, Equals, t.on)
}
}
func (s *testVarsutilSuite) TestNewSessionVars(c *C) {
defer testleak.AfterTest(c)()
vars := NewSessionVars()
c.Assert(vars.IndexJoinBatchSize, Equals, DefIndexJoinBatchSize)
c.Assert(vars.IndexLookupSize, Equals, DefIndexLookupSize)
c.Assert(vars.indexLookupConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.indexSerialScanConcurrency, Equals, DefIndexSerialScanConcurrency)
c.Assert(vars.indexLookupJoinConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.hashJoinConcurrency, Equals, DefTiDBHashJoinConcurrency)
c.Assert(vars.IndexLookupConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.IndexSerialScanConcurrency(), Equals, DefIndexSerialScanConcurrency)
c.Assert(vars.IndexLookupJoinConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.HashJoinConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.AllowBatchCop, Equals, DefTiDBAllowBatchCop)
c.Assert(vars.AllowBCJ, Equals, DefOptBCJ)
c.Assert(vars.projectionConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.hashAggPartialConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.hashAggFinalConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.windowConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.mergeJoinConcurrency, Equals, DefTiDBMergeJoinConcurrency)
c.Assert(vars.streamAggConcurrency, Equals, DefTiDBStreamAggConcurrency)
c.Assert(vars.distSQLScanConcurrency, Equals, DefDistSQLScanConcurrency)
c.Assert(vars.ProjectionConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.HashAggPartialConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.HashAggFinalConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.WindowConcurrency(), Equals, DefExecutorConcurrency)
c.Assert(vars.MergeJoinConcurrency(), Equals, DefTiDBMergeJoinConcurrency)
c.Assert(vars.StreamAggConcurrency(), Equals, DefTiDBStreamAggConcurrency)
c.Assert(vars.DistSQLScanConcurrency(), Equals, DefDistSQLScanConcurrency)
c.Assert(vars.ExecutorConcurrency, Equals, DefExecutorConcurrency)
c.Assert(vars.MaxChunkSize, Equals, DefMaxChunkSize)
c.Assert(vars.DMLBatchSize, Equals, DefDMLBatchSize)
c.Assert(vars.MemQuotaQuery, Equals, config.GetGlobalConfig().MemQuotaQuery)
c.Assert(vars.MemQuotaHashJoin, Equals, int64(DefTiDBMemQuotaHashJoin))
c.Assert(vars.MemQuotaMergeJoin, Equals, int64(DefTiDBMemQuotaMergeJoin))
c.Assert(vars.MemQuotaSort, Equals, int64(DefTiDBMemQuotaSort))
c.Assert(vars.MemQuotaTopn, Equals, int64(DefTiDBMemQuotaTopn))
c.Assert(vars.MemQuotaIndexLookupReader, Equals, int64(DefTiDBMemQuotaIndexLookupReader))
c.Assert(vars.MemQuotaIndexLookupJoin, Equals, int64(DefTiDBMemQuotaIndexLookupJoin))
c.Assert(vars.MemQuotaApplyCache, Equals, int64(DefTiDBMemQuotaApplyCache))
c.Assert(vars.EnableRadixJoin, Equals, DefTiDBUseRadixJoin)
c.Assert(vars.AllowWriteRowID, Equals, DefOptWriteRowID)
c.Assert(vars.TiDBOptJoinReorderThreshold, Equals, DefTiDBOptJoinReorderThreshold)
c.Assert(vars.EnableFastAnalyze, Equals, DefTiDBUseFastAnalyze)
c.Assert(vars.FoundInPlanCache, Equals, DefTiDBFoundInPlanCache)
c.Assert(vars.FoundInBinding, Equals, DefTiDBFoundInBinding)
c.Assert(vars.AllowAutoRandExplicitInsert, Equals, DefTiDBAllowAutoRandExplicitInsert)
c.Assert(vars.ShardAllocateStep, Equals, int64(DefTiDBShardAllocateStep))
c.Assert(vars.EnableChangeColumnType, Equals, DefTiDBChangeColumnType)
c.Assert(vars.AnalyzeVersion, Equals, DefTiDBAnalyzeVersion)
assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.MemQuota))
assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.BatchSize))
}
func assertFieldsGreaterThanZero(c *C, val reflect.Value) {
for i := 0; i < val.NumField(); i++ {
fieldVal := val.Field(i)
c.Assert(fieldVal.Int(), Greater, int64(0))
}
}
func (s *testVarsutilSuite) TestVarsutil(c *C) {
defer testleak.AfterTest(c)()
v := NewSessionVars()
v.GlobalVarsAccessor = NewMockGlobalAccessor()
err := SetSessionSystemVar(v, "autocommit", types.NewStringDatum("1"))
c.Assert(err, IsNil)
val, err := GetSessionSystemVar(v, "autocommit")
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(SetSessionSystemVar(v, "autocommit", types.Datum{}), NotNil)
// 0 converts to OFF
err = SetSessionSystemVar(v, "foreign_key_checks", types.NewStringDatum("0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, "foreign_key_checks")
c.Assert(err, IsNil)
c.Assert(val, Equals, "OFF")
// 1/ON is not supported (generates a warning and sets to OFF)
err = SetSessionSystemVar(v, "foreign_key_checks", types.NewStringDatum("1"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, "foreign_key_checks")
c.Assert(err, IsNil)
c.Assert(val, Equals, "OFF")
err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("strict_trans_tables"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, "sql_mode")
c.Assert(err, IsNil)
c.Assert(val, Equals, "STRICT_TRANS_TABLES")
c.Assert(v.StrictSQLMode, IsTrue)
err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum(""))
c.Assert(err, IsNil)
c.Assert(v.StrictSQLMode, IsFalse)
err = SetSessionSystemVar(v, "character_set_connection", types.NewStringDatum("utf8"))
c.Assert(err, IsNil)
err = SetSessionSystemVar(v, "collation_connection", types.NewStringDatum("utf8_general_ci"))
c.Assert(err, IsNil)
charset, collation := v.GetCharsetInfo()
c.Assert(charset, Equals, "utf8")
c.Assert(collation, Equals, "utf8_general_ci")
c.Assert(SetSessionSystemVar(v, "character_set_results", types.Datum{}), IsNil)
// Test case for time_zone session variable.
tests := []struct {
input string
expect string
compareValue bool
diff time.Duration
err error
}{
{"Europe/Helsinki", "Europe/Helsinki", true, -2 * time.Hour, nil},
{"US/Eastern", "US/Eastern", true, 5 * time.Hour, nil},
// TODO: Check it out and reopen this case.
// {"SYSTEM", "Local", false, 0},
{"+10:00", "", true, -10 * time.Hour, nil},
{"-6:00", "", true, 6 * time.Hour, nil},
{"+14:00", "", true, -14 * time.Hour, nil},
{"-12:59", "", true, 12*time.Hour + 59*time.Minute, nil},
{"+14:01", "", false, -14 * time.Hour, ErrUnknownTimeZone.GenWithStackByArgs("+14:01")},
{"-13:00", "", false, 13 * time.Hour, ErrUnknownTimeZone.GenWithStackByArgs("-13:00")},
}
for _, tt := range tests {
err = SetSessionSystemVar(v, TimeZone, types.NewStringDatum(tt.input))
if tt.err != nil {
c.Assert(err, NotNil)
continue
}
c.Assert(err, IsNil)
c.Assert(v.TimeZone.String(), Equals, tt.expect)
if tt.compareValue {
err = SetSessionSystemVar(v, TimeZone, types.NewStringDatum(tt.input))
c.Assert(err, IsNil)
t1 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
t2 := time.Date(2000, 1, 1, 0, 0, 0, 0, v.TimeZone)
c.Assert(t2.Sub(t1), Equals, tt.diff)
}
}
err = SetSessionSystemVar(v, TimeZone, types.NewStringDatum("6:00"))
c.Assert(err, NotNil)
c.Assert(terror.ErrorEqual(err, ErrUnknownTimeZone), IsTrue)
// Test case for sql mode.
for str, mode := range mysql.Str2SQLMode {
err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum(str))
c.Assert(err, IsNil)
if modeParts, exists := mysql.CombinationSQLMode[str]; exists {
for _, part := range modeParts {
mode |= mysql.Str2SQLMode[part]
}
}
c.Assert(v.SQLMode, Equals, mode)
}
err = SetSessionSystemVar(v, "tidb_opt_broadcast_join", types.NewStringDatum("1"))
c.Assert(err, IsNil)
err = SetSessionSystemVar(v, "tidb_allow_batch_cop", types.NewStringDatum("0"))
c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue)
err = SetSessionSystemVar(v, "tidb_opt_broadcast_join", types.NewStringDatum("0"))
c.Assert(err, IsNil)
err = SetSessionSystemVar(v, "tidb_allow_batch_cop", types.NewStringDatum("0"))
c.Assert(err, IsNil)
err = SetSessionSystemVar(v, "tidb_opt_broadcast_join", types.NewStringDatum("1"))
c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue)
// Combined sql_mode
err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("REAL_AS_FLOAT,ANSI_QUOTES"))
c.Assert(err, IsNil)
c.Assert(v.SQLMode, Equals, mysql.ModeRealAsFloat|mysql.ModeANSIQuotes)
// Test case for tidb_index_serial_scan_concurrency.
c.Assert(v.IndexSerialScanConcurrency(), Equals, DefIndexSerialScanConcurrency)
err = SetSessionSystemVar(v, TiDBIndexSerialScanConcurrency, types.NewStringDatum("4"))
c.Assert(err, IsNil)
c.Assert(v.IndexSerialScanConcurrency(), Equals, 4)
// Test case for tidb_batch_insert.
c.Assert(v.BatchInsert, IsFalse)
err = SetSessionSystemVar(v, TiDBBatchInsert, types.NewStringDatum("1"))
c.Assert(err, IsNil)
c.Assert(v.BatchInsert, IsTrue)
c.Assert(v.InitChunkSize, Equals, 32)
c.Assert(v.MaxChunkSize, Equals, 1024)
err = SetSessionSystemVar(v, TiDBMaxChunkSize, types.NewStringDatum("2"))
c.Assert(err, NotNil)
err = SetSessionSystemVar(v, TiDBInitChunkSize, types.NewStringDatum("1024"))
c.Assert(err, NotNil)
// Test case for TiDBConfig session variable.
err = SetSessionSystemVar(v, TiDBConfig, types.NewStringDatum("abc"))
c.Assert(terror.ErrorEqual(err, ErrIncorrectScope), IsTrue)
val, err = GetSessionSystemVar(v, TiDBConfig)
c.Assert(err, IsNil)
bVal, err := json.MarshalIndent(config.GetGlobalConfig(), "", "\t")
c.Assert(err, IsNil)
c.Assert(val, Equals, config.HideConfig(string(bVal)))
err = SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("1"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableStreaming)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(v.EnableStreaming, Equals, true)
err = SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableStreaming)
c.Assert(err, IsNil)
c.Assert(val, Equals, "OFF")
c.Assert(v.EnableStreaming, Equals, false)
c.Assert(v.OptimizerSelectivityLevel, Equals, DefTiDBOptimizerSelectivityLevel)
err = SetSessionSystemVar(v, TiDBOptimizerSelectivityLevel, types.NewIntDatum(1))
c.Assert(err, IsNil)
c.Assert(v.OptimizerSelectivityLevel, Equals, 1)
err = SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(-1))
c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue)
err = SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(int64(maxDDLReorgWorkerCount)+1))
c.Assert(err, NotNil)
c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue)
err = SetSessionSystemVar(v, TiDBRetryLimit, types.NewStringDatum("3"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBRetryLimit)
c.Assert(err, IsNil)
c.Assert(val, Equals, "3")
c.Assert(v.RetryLimit, Equals, int64(3))
c.Assert(v.EnableTablePartition, Equals, "")
err = SetSessionSystemVar(v, TiDBEnableTablePartition, types.NewStringDatum("on"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableTablePartition)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(v.EnableTablePartition, Equals, "ON")
c.Assert(v.EnableListTablePartition, Equals, false)
err = SetSessionSystemVar(v, TiDBEnableListTablePartition, types.NewStringDatum("on"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableListTablePartition)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(v.EnableListTablePartition, Equals, true)
c.Assert(v.TiDBOptJoinReorderThreshold, Equals, DefTiDBOptJoinReorderThreshold)
err = SetSessionSystemVar(v, TiDBOptJoinReorderThreshold, types.NewIntDatum(5))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptJoinReorderThreshold)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5")
c.Assert(v.TiDBOptJoinReorderThreshold, Equals, 5)
err = SetSessionSystemVar(v, TiDBCheckMb4ValueInUTF8, types.NewStringDatum("1"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBCheckMb4ValueInUTF8)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(config.GetGlobalConfig().CheckMb4ValueInUTF8, Equals, true)
err = SetSessionSystemVar(v, TiDBCheckMb4ValueInUTF8, types.NewStringDatum("0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBCheckMb4ValueInUTF8)
c.Assert(err, IsNil)
c.Assert(val, Equals, "OFF")
c.Assert(config.GetGlobalConfig().CheckMb4ValueInUTF8, Equals, false)
err = SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("1"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBLowResolutionTSO)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(v.LowResolutionTSO, Equals, true)
err = SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBLowResolutionTSO)
c.Assert(err, IsNil)
c.Assert(val, Equals, "OFF")
c.Assert(v.LowResolutionTSO, Equals, false)
c.Assert(v.CorrelationThreshold, Equals, 0.9)
err = SetSessionSystemVar(v, TiDBOptCorrelationThreshold, types.NewStringDatum("0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptCorrelationThreshold)
c.Assert(err, IsNil)
c.Assert(val, Equals, "0")
c.Assert(v.CorrelationThreshold, Equals, float64(0))
c.Assert(v.CPUFactor, Equals, 3.0)
err = SetSessionSystemVar(v, TiDBOptCPUFactor, types.NewStringDatum("5.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptCPUFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5.0")
c.Assert(v.CPUFactor, Equals, 5.0)
c.Assert(v.CopCPUFactor, Equals, 3.0)
err = SetSessionSystemVar(v, TiDBOptCopCPUFactor, types.NewStringDatum("5.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptCopCPUFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5.0")
c.Assert(v.CopCPUFactor, Equals, 5.0)
c.Assert(v.CopTiFlashConcurrencyFactor, Equals, 24.0)
err = SetSessionSystemVar(v, TiDBOptTiFlashConcurrencyFactor, types.NewStringDatum("5.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptTiFlashConcurrencyFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5.0")
c.Assert(v.CopCPUFactor, Equals, 5.0)
c.Assert(v.NetworkFactor, Equals, 1.0)
err = SetSessionSystemVar(v, TiDBOptNetworkFactor, types.NewStringDatum("3.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptNetworkFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "3.0")
c.Assert(v.NetworkFactor, Equals, 3.0)
c.Assert(v.ScanFactor, Equals, 1.5)
err = SetSessionSystemVar(v, TiDBOptScanFactor, types.NewStringDatum("3.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptScanFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "3.0")
c.Assert(v.ScanFactor, Equals, 3.0)
c.Assert(v.DescScanFactor, Equals, 3.0)
err = SetSessionSystemVar(v, TiDBOptDescScanFactor, types.NewStringDatum("5.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptDescScanFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5.0")
c.Assert(v.DescScanFactor, Equals, 5.0)
c.Assert(v.SeekFactor, Equals, 20.0)
err = SetSessionSystemVar(v, TiDBOptSeekFactor, types.NewStringDatum("50.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptSeekFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "50.0")
c.Assert(v.SeekFactor, Equals, 50.0)
c.Assert(v.MemoryFactor, Equals, 0.001)
err = SetSessionSystemVar(v, TiDBOptMemoryFactor, types.NewStringDatum("1.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptMemoryFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "1.0")
c.Assert(v.MemoryFactor, Equals, 1.0)
c.Assert(v.DiskFactor, Equals, 1.5)
err = SetSessionSystemVar(v, TiDBOptDiskFactor, types.NewStringDatum("1.1"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptDiskFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "1.1")
c.Assert(v.DiskFactor, Equals, 1.1)
c.Assert(v.ConcurrencyFactor, Equals, 3.0)
err = SetSessionSystemVar(v, TiDBOptConcurrencyFactor, types.NewStringDatum("5.0"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBOptConcurrencyFactor)
c.Assert(err, IsNil)
c.Assert(val, Equals, "5.0")
c.Assert(v.ConcurrencyFactor, Equals, 5.0)
err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("follower"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBReplicaRead)
c.Assert(err, IsNil)
c.Assert(val, Equals, "follower")
c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadFollower)
err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBReplicaRead)
c.Assert(err, IsNil)
c.Assert(val, Equals, "leader")
c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadLeader)
err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader-and-follower"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBReplicaRead)
c.Assert(err, IsNil)
c.Assert(val, Equals, "leader-and-follower")
c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadMixed)
err = SetSessionSystemVar(v, TiDBEnableStmtSummary, types.NewStringDatum("ON"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableStmtSummary)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
err = SetSessionSystemVar(v, TiDBRedactLog, types.NewStringDatum("ON"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBRedactLog)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
err = SetSessionSystemVar(v, TiDBStmtSummaryRefreshInterval, types.NewStringDatum("10"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBStmtSummaryRefreshInterval)
c.Assert(err, IsNil)
c.Assert(val, Equals, "10")
err = SetSessionSystemVar(v, TiDBStmtSummaryHistorySize, types.NewStringDatum("10"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBStmtSummaryHistorySize)
c.Assert(err, IsNil)
c.Assert(val, Equals, "10")
err = SetSessionSystemVar(v, TiDBStmtSummaryMaxStmtCount, types.NewStringDatum("10"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBStmtSummaryMaxStmtCount)
c.Assert(err, IsNil)
c.Assert(val, Equals, "10")
err = SetSessionSystemVar(v, TiDBStmtSummaryMaxStmtCount, types.NewStringDatum("a"))
c.Assert(err, ErrorMatches, ".*Incorrect argument type to variable 'tidb_stmt_summary_max_stmt_count'")
err = SetSessionSystemVar(v, TiDBStmtSummaryMaxSQLLength, types.NewStringDatum("10"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBStmtSummaryMaxSQLLength)
c.Assert(err, IsNil)
c.Assert(val, Equals, "10")
err = SetSessionSystemVar(v, TiDBStmtSummaryMaxSQLLength, types.NewStringDatum("a"))
c.Assert(err, ErrorMatches, ".*Incorrect argument type to variable 'tidb_stmt_summary_max_sql_length'")
err = SetSessionSystemVar(v, TiDBFoundInPlanCache, types.NewStringDatum("1"))
c.Assert(err, ErrorMatches, ".*]Variable 'last_plan_from_cache' is a read only variable")
err = SetSessionSystemVar(v, TiDBFoundInBinding, types.NewStringDatum("1"))
c.Assert(err, ErrorMatches, ".*]Variable 'last_plan_from_binding' is a read only variable")
err = SetSessionSystemVar(v, TiDBEnableChangeColumnType, types.NewStringDatum("ON"))
c.Assert(err, IsNil)
val, err = GetSessionSystemVar(v, TiDBEnableChangeColumnType)
c.Assert(err, IsNil)
c.Assert(val, Equals, "ON")
c.Assert(v.systems[TiDBEnableChangeColumnType], Equals, "ON")
err = SetSessionSystemVar(v, "UnknownVariable", types.NewStringDatum("on"))
c.Assert(err, ErrorMatches, ".*]Unknown system variable 'UnknownVariable'")
err = SetSessionSystemVar(v, TiDBAnalyzeVersion, types.NewStringDatum("3"))
c.Assert(err, ErrorMatches, ".*Variable 'tidb_analyze_version' can't be set to the value of '3'")
}
func (s *testVarsutilSuite) TestSetOverflowBehave(c *C) {
ddRegWorker := maxDDLReorgWorkerCount + 1
SetDDLReorgWorkerCounter(ddRegWorker)
c.Assert(maxDDLReorgWorkerCount, Equals, GetDDLReorgWorkerCounter())
ddlReorgBatchSize := MaxDDLReorgBatchSize + 1
SetDDLReorgBatchSize(ddlReorgBatchSize)
c.Assert(MaxDDLReorgBatchSize, Equals, GetDDLReorgBatchSize())
ddlReorgBatchSize = MinDDLReorgBatchSize - 1
SetDDLReorgBatchSize(ddlReorgBatchSize)
c.Assert(MinDDLReorgBatchSize, Equals, GetDDLReorgBatchSize())
val := tidbOptInt64("a", 1)
c.Assert(val, Equals, int64(1))
val2 := tidbOptFloat64("b", 1.2)
c.Assert(val2, Equals, 1.2)
}
func (s *testVarsutilSuite) TestValidate(c *C) {
v := NewSessionVars()
v.GlobalVarsAccessor = NewMockGlobalAccessor()
v.TimeZone = time.UTC
tests := []struct {
key string
value string
error bool
}{
{TiDBAutoAnalyzeStartTime, "15:04", false},
{TiDBAutoAnalyzeStartTime, "15:04 -0700", false},
{DelayKeyWrite, "ON", false},
{DelayKeyWrite, "OFF", false},
{DelayKeyWrite, "ALL", false},
{DelayKeyWrite, "3", true},
{ForeignKeyChecks, "3", true},
{MaxSpRecursionDepth, "256", false},
{SessionTrackGtids, "OFF", false},
{SessionTrackGtids, "OWN_GTID", false},
{SessionTrackGtids, "ALL_GTIDS", false},
{SessionTrackGtids, "ON", true},
{EnforceGtidConsistency, "OFF", false},
{EnforceGtidConsistency, "ON", false},
{EnforceGtidConsistency, "WARN", false},
{QueryCacheType, "OFF", false},
{QueryCacheType, "ON", false},
{QueryCacheType, "DEMAND", false},
{QueryCacheType, "3", true},
{SecureAuth, "1", false},
{SecureAuth, "3", true},
{MyISAMUseMmap, "ON", false},
{MyISAMUseMmap, "OFF", false},
{TiDBEnableTablePartition, "ON", false},
{TiDBEnableTablePartition, "OFF", false},
{TiDBEnableTablePartition, "AUTO", false},
{TiDBEnableTablePartition, "UN", true},
{TiDBEnableListTablePartition, "ON", false},
{TiDBEnableListTablePartition, "OFF", false},
{TiDBEnableListTablePartition, "list", true},
{TiDBOptCorrelationExpFactor, "a", true},
{TiDBOptCorrelationExpFactor, "-10", true},
{TiDBOptCorrelationThreshold, "a", true},
{TiDBOptCorrelationThreshold, "-2", true},
{TiDBOptCPUFactor, "a", true},
{TiDBOptCPUFactor, "-2", true},
{TiDBOptTiFlashConcurrencyFactor, "-2", true},
{TiDBOptCopCPUFactor, "a", true},
{TiDBOptCopCPUFactor, "-2", true},
{TiDBOptNetworkFactor, "a", true},
{TiDBOptNetworkFactor, "-2", true},
{TiDBOptScanFactor, "a", true},
{TiDBOptScanFactor, "-2", true},
{TiDBOptDescScanFactor, "a", true},
{TiDBOptDescScanFactor, "-2", true},
{TiDBOptSeekFactor, "a", true},
{TiDBOptSeekFactor, "-2", true},
{TiDBOptMemoryFactor, "a", true},
{TiDBOptMemoryFactor, "-2", true},
{TiDBOptDiskFactor, "a", true},
{TiDBOptDiskFactor, "-2", true},
{TiDBOptConcurrencyFactor, "a", true},
{TiDBOptConcurrencyFactor, "-2", true},
{TxnIsolation, "READ-UNCOMMITTED", true},
{TiDBInitChunkSize, "a", true},
{TiDBInitChunkSize, "-1", true},
{TiDBMaxChunkSize, "a", true},
{TiDBMaxChunkSize, "-1", true},
{TiDBOptJoinReorderThreshold, "a", true},
{TiDBOptJoinReorderThreshold, "-1", true},
{TiDBReplicaRead, "invalid", true},
{TiDBTxnMode, "invalid", true},
{TiDBTxnMode, "pessimistic", false},
{TiDBTxnMode, "optimistic", false},
{TiDBTxnMode, "", false},
{TiDBIsolationReadEngines, "", true},
{TiDBIsolationReadEngines, "tikv", false},
{TiDBIsolationReadEngines, "TiKV,tiflash", false},
{TiDBIsolationReadEngines, " tikv, tiflash ", false},
{TiDBShardAllocateStep, "ad", true},
{TiDBShardAllocateStep, "-123", false},
{TiDBShardAllocateStep, "128", false},
{TiDBEnableAmendPessimisticTxn, "0", false},
{TiDBEnableAmendPessimisticTxn, "1", false},
{TiDBEnableAmendPessimisticTxn, "256", true},
{TiDBAllowFallbackToTiKV, "", false},
{TiDBAllowFallbackToTiKV, "tiflash", false},
{TiDBAllowFallbackToTiKV, " tiflash ", false},
{TiDBAllowFallbackToTiKV, "tikv", true},
{TiDBAllowFallbackToTiKV, "tidb", true},
{TiDBAllowFallbackToTiKV, "tiflash,tikv,tidb", true},
}
for _, t := range tests {
_, err := GetSysVar(t.key).Validate(v, t.value, ScopeGlobal)
if t.error {
c.Assert(err, NotNil, Commentf("%v got err=%v", t, err))
} else {
c.Assert(err, IsNil, Commentf("%v got err=%v", t, err))
}
}
}
func (s *testVarsutilSuite) TestValidateStmtSummary(c *C) {
v := NewSessionVars()
v.GlobalVarsAccessor = NewMockGlobalAccessor()
v.TimeZone = time.UTC
tests := []struct {
key string
value string
error bool
scope ScopeFlag
}{
{TiDBEnableStmtSummary, "a", true, ScopeSession},
{TiDBEnableStmtSummary, "-1", true, ScopeSession},
{TiDBEnableStmtSummary, "", false, ScopeSession},
{TiDBEnableStmtSummary, "", true, ScopeGlobal},
{TiDBStmtSummaryInternalQuery, "a", true, ScopeSession},
{TiDBStmtSummaryInternalQuery, "-1", true, ScopeSession},
{TiDBStmtSummaryInternalQuery, "", false, ScopeSession},
{TiDBStmtSummaryInternalQuery, "", true, ScopeGlobal},
{TiDBStmtSummaryRefreshInterval, "a", true, ScopeSession},
{TiDBStmtSummaryRefreshInterval, "", false, ScopeSession},
{TiDBStmtSummaryRefreshInterval, "", true, ScopeGlobal},
{TiDBStmtSummaryRefreshInterval, "0", true, ScopeGlobal},
{TiDBStmtSummaryRefreshInterval, "99999999999", true, ScopeGlobal},
{TiDBStmtSummaryHistorySize, "a", true, ScopeSession},
{TiDBStmtSummaryHistorySize, "", false, ScopeSession},
{TiDBStmtSummaryHistorySize, "", true, ScopeGlobal},
{TiDBStmtSummaryHistorySize, "0", false, ScopeGlobal},
{TiDBStmtSummaryHistorySize, "-1", true, ScopeGlobal},
{TiDBStmtSummaryHistorySize, "99999999", true, ScopeGlobal},
{TiDBStmtSummaryMaxStmtCount, "a", true, ScopeSession},
{TiDBStmtSummaryMaxStmtCount, "", false, ScopeSession},
{TiDBStmtSummaryMaxStmtCount, "", true, ScopeGlobal},
{TiDBStmtSummaryMaxStmtCount, "0", true, ScopeGlobal},
{TiDBStmtSummaryMaxStmtCount, "99999999", true, ScopeGlobal},
{TiDBStmtSummaryMaxSQLLength, "a", true, ScopeSession},
{TiDBStmtSummaryMaxSQLLength, "", false, ScopeSession},
{TiDBStmtSummaryMaxSQLLength, "", true, ScopeGlobal},
{TiDBStmtSummaryMaxSQLLength, "0", false, ScopeGlobal},
{TiDBStmtSummaryMaxSQLLength, "-1", true, ScopeGlobal},
{TiDBStmtSummaryMaxSQLLength, "99999999999", true, ScopeGlobal},
}
for _, t := range tests {
_, err := GetSysVar(t.key).Validate(v, t.value, t.scope)
if t.error {
c.Assert(err, NotNil, Commentf("%v got err=%v", t, err))
} else {
c.Assert(err, IsNil, Commentf("%v got err=%v", t, err))
}
}
}
func (s *testVarsutilSuite) TestConcurrencyVariables(c *C) {
defer testleak.AfterTest(c)()
vars := NewSessionVars()
vars.GlobalVarsAccessor = NewMockGlobalAccessor()
wdConcurrency := 2
c.Assert(vars.windowConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.WindowConcurrency(), Equals, DefExecutorConcurrency)
err := SetSessionSystemVar(vars, TiDBWindowConcurrency, types.NewIntDatum(int64(wdConcurrency)))
c.Assert(err, IsNil)
c.Assert(vars.windowConcurrency, Equals, wdConcurrency)
c.Assert(vars.WindowConcurrency(), Equals, wdConcurrency)
mjConcurrency := 2
c.Assert(vars.mergeJoinConcurrency, Equals, DefTiDBMergeJoinConcurrency)
c.Assert(vars.MergeJoinConcurrency(), Equals, DefTiDBMergeJoinConcurrency)
err = SetSessionSystemVar(vars, TiDBMergeJoinConcurrency, types.NewIntDatum(int64(mjConcurrency)))
c.Assert(err, IsNil)
c.Assert(vars.mergeJoinConcurrency, Equals, mjConcurrency)
c.Assert(vars.MergeJoinConcurrency(), Equals, mjConcurrency)
saConcurrency := 2
c.Assert(vars.streamAggConcurrency, Equals, DefTiDBStreamAggConcurrency)
c.Assert(vars.StreamAggConcurrency(), Equals, DefTiDBStreamAggConcurrency)
err = SetSessionSystemVar(vars, TiDBStreamAggConcurrency, types.NewIntDatum(int64(saConcurrency)))
c.Assert(err, IsNil)
c.Assert(vars.streamAggConcurrency, Equals, saConcurrency)
c.Assert(vars.StreamAggConcurrency(), Equals, saConcurrency)
c.Assert(vars.indexLookupConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.IndexLookupConcurrency(), Equals, DefExecutorConcurrency)
exeConcurrency := DefExecutorConcurrency + 1
err = SetSessionSystemVar(vars, TiDBExecutorConcurrency, types.NewIntDatum(int64(exeConcurrency)))
c.Assert(err, IsNil)
c.Assert(vars.indexLookupConcurrency, Equals, ConcurrencyUnset)
c.Assert(vars.IndexLookupConcurrency(), Equals, exeConcurrency)
c.Assert(vars.WindowConcurrency(), Equals, wdConcurrency)
c.Assert(vars.MergeJoinConcurrency(), Equals, mjConcurrency)
c.Assert(vars.StreamAggConcurrency(), Equals, saConcurrency)
}
| sessionctx/variable/varsutil_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00031503327772952616,
0.00017251948884222656,
0.00016446183144580573,
0.00017007399583235383,
0.000018029293642030098
] |
{
"id": 3,
"code_window": [
"\t\"github.com/pingcap/tidb/planner/property\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\tdriver \"github.com/pingcap/tidb/types/parser_driver\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "add",
"edit_start_line_idx": 35
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"encoding/hex"
"math/rand"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
type actionPessimisticLock struct {
*kv.LockCtx
}
type actionPessimisticRollback struct{}
var (
_ twoPhaseCommitAction = actionPessimisticLock{}
_ twoPhaseCommitAction = actionPessimisticRollback{}
)
func (actionPessimisticLock) String() string {
return "pessimistic_lock"
}
func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticLock
}
func (actionPessimisticRollback) String() string {
return "pessimistic_rollback"
}
func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticRollback
}
func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
m := batch.mutations
mutations := make([]*pb.Mutation, m.Len())
for i := 0; i < m.Len(); i++ {
mut := &pb.Mutation{
Op: pb.Op_PessimisticLock,
Key: m.GetKey(i),
}
if c.txn.us.HasPresumeKeyNotExists(m.GetKey(i)) || (c.doingAmend && m.GetOp(i) == pb.Op_Insert) {
mut.Assertion = pb.Assertion_NotExist
}
mutations[i] = mut
}
elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond)
ttl := elapsed + atomic.LoadUint64(&ManagedLockTTL)
failpoint.Inject("shortPessimisticLockTTL", func() {
ttl = 1
keys := make([]string, 0, len(mutations))
for _, m := range mutations {
keys = append(keys, hex.EncodeToString(m.Key))
}
logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys))
})
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &pb.PessimisticLockRequest{
Mutations: mutations,
PrimaryLock: c.primary(),
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
LockTtl: ttl,
IsFirstLock: c.isFirstLock,
WaitTimeout: action.LockWaitTime,
ReturnValues: action.ReturnValues,
MinCommitTs: c.forUpdateTS + 1,
}, pb.Context{Priority: c.priority, SyncLog: c.syncLog})
lockWaitStartTime := action.WaitStartTime
for {
// if lockWaitTime set, refine the request `WaitTimeout` field based on timeout limit
if action.LockWaitTime > 0 {
timeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()
if timeLeft <= 0 {
req.PessimisticLock().WaitTimeout = tidbkv.LockNoWait
} else {
req.PessimisticLock().WaitTimeout = timeLeft
}
}
failpoint.Inject("PessimisticLockErrWriteConflict", func() error {
time.Sleep(300 * time.Millisecond)
return &kv.ErrWriteConflict{WriteConflict: nil}
})
startTime := time.Now()
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.LockRPCTime, int64(time.Since(startTime)))
atomic.AddInt64(&action.LockCtx.Stats.LockRPCCount, 1)
}
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticLockMutations(bo, action.LockCtx, batch.mutations)
return errors.Trace(err)
}
if resp.Resp == nil {
return errors.Trace(kv.ErrBodyMissing)
}
lockResp := resp.Resp.(*pb.PessimisticLockResponse)
keyErrs := lockResp.GetErrors()
if len(keyErrs) == 0 {
if action.ReturnValues {
action.ValuesLock.Lock()
for i, mutation := range mutations {
action.Values[string(mutation.Key)] = kv.ReturnedValue{Value: lockResp.Values[i]}
}
action.ValuesLock.Unlock()
}
return nil
}
var locks []*Lock
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
e := &kv.ErrKeyExist{AlreadyExist: alreadyExist}
return c.extractKeyExistsErr(e)
}
if deadlock := keyErr.Deadlock; deadlock != nil {
return &kv.ErrDeadlock{Deadlock: deadlock}
}
// Extract lock from key error
lock, err1 := extractLockFromKeyErr(keyErr)
if err1 != nil {
return errors.Trace(err1)
}
locks = append(locks, lock)
}
// Because we already waited on tikv, no need to Backoff here.
// tikv default will wait 3s(also the maximum wait value) when lock error occurs
startTime = time.Now()
msBeforeTxnExpired, _, err := c.store.lockResolver.ResolveLocks(bo, 0, locks)
if err != nil {
return errors.Trace(err)
}
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.ResolveLockTime, int64(time.Since(startTime)))
}
// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring
// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.
if msBeforeTxnExpired > 0 {
if action.LockWaitTime == tidbkv.LockNoWait {
return kv.ErrLockAcquireFailAndNoWaitSet
} else if action.LockWaitTime == tidbkv.LockAlwaysWait {
// do nothing but keep wait
} else {
// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock
if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime {
return errors.Trace(kv.ErrLockWaitTimeout)
}
}
if action.LockCtx.PessimisticLockWaited != nil {
atomic.StoreInt32(action.LockCtx.PessimisticLockWaited, 1)
}
}
// Handle the killed flag when waiting for the pessimistic lock.
// When a txn runs into LockKeys() and backoff here, it has no chance to call
// executor.Next() and check the killed flag.
if action.Killed != nil {
// Do not reset the killed flag here!
// actionPessimisticLock runs on each region parallelly, we have to consider that
// the error may be dropped.
if atomic.LoadUint32(action.Killed) == 1 {
return errors.Trace(kv.ErrQueryInterrupted)
}
}
}
}
func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &pb.PessimisticRollbackRequest{
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
Keys: batch.mutations.GetKeys(),
})
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticRollbackMutations(bo, batch.mutations)
return errors.Trace(err)
}
return nil
}
func (c *twoPhaseCommitter) pessimisticLockMutations(bo *Backoffer, lockCtx *kv.LockCtx, mutations CommitterMutations) error {
if c.sessionID > 0 {
failpoint.Inject("beforePessimisticLock", func(val failpoint.Value) {
// Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like
// `return("delay,fail")`. Then they will be executed sequentially at once.
if v, ok := val.(string); ok {
for _, action := range strings.Split(v, ",") {
if action == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 5))
logutil.Logger(bo.ctx).Info("[failpoint] injected delay at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
} else if action == "fail" {
logutil.Logger(bo.ctx).Info("[failpoint] injected failure at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS))
failpoint.Return(errors.New("injected failure at pessimistic lock"))
}
}
}
})
}
return c.doActionOnMutations(bo, actionPessimisticLock{lockCtx}, mutations)
}
func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *Backoffer, mutations CommitterMutations) error {
return c.doActionOnMutations(bo, actionPessimisticRollback{}, mutations)
}
| store/tikv/pessimistic.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0006035302067175508,
0.00019331461226101965,
0.00016469175170641392,
0.00016955625324044377,
0.00008694911957718432
] |
{
"id": 3,
"code_window": [
"\t\"github.com/pingcap/tidb/planner/property\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\tdriver \"github.com/pingcap/tidb/types/parser_driver\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "add",
"edit_start_line_idx": 35
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"google.golang.org/grpc/metadata"
)
func TestT(t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
type testClientSuite struct {
OneByOneSuite
}
type testClientSerialSuite struct {
OneByOneSuite
}
var _ = Suite(&testClientSuite{})
var _ = SerialSuites(&testClientFailSuite{})
var _ = SerialSuites(&testClientSerialSuite{})
func (s *testClientSerialSuite) TestConn(c *C) {
defer config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.MaxBatchSize = 0
})()
client := NewRPCClient(config.Security{})
addr := "127.0.0.1:6379"
conn1, err := client.getConnArray(addr, true)
c.Assert(err, IsNil)
conn2, err := client.getConnArray(addr, true)
c.Assert(err, IsNil)
c.Assert(conn2.Get(), Not(Equals), conn1.Get())
client.Close()
conn3, err := client.getConnArray(addr, true)
c.Assert(err, NotNil)
c.Assert(conn3, IsNil)
}
func (s *testClientSuite) TestCancelTimeoutRetErr(c *C) {
req := new(tikvpb.BatchCommandsRequest_Request)
a := newBatchConn(1, 1, nil)
ctx, cancel := context.WithCancel(context.TODO())
cancel()
_, err := sendBatchRequest(ctx, "", "", a, req, 2*time.Second)
c.Assert(errors.Cause(err), Equals, context.Canceled)
_, err = sendBatchRequest(context.Background(), "", "", a, req, 0)
c.Assert(errors.Cause(err), Equals, context.DeadlineExceeded)
}
func (s *testClientSuite) TestSendWhenReconnect(c *C) {
server, port := startMockTikvService()
c.Assert(port > 0, IsTrue)
rpcClient := NewRPCClient(config.Security{})
addr := fmt.Sprintf("%s:%d", "127.0.0.1", port)
conn, err := rpcClient.getConnArray(addr, true)
c.Assert(err, IsNil)
// Suppose all connections are re-establishing.
for _, client := range conn.batchConn.batchCommandsClients {
client.lockForRecreate()
}
req := tikvrpc.NewRequest(tikvrpc.CmdEmpty, &tikvpb.BatchCommandsEmptyRequest{})
_, err = rpcClient.SendRequest(context.Background(), addr, req, 100*time.Second)
c.Assert(err.Error() == "no available connections", IsTrue)
conn.Close()
server.Stop()
}
// chanClient sends received requests to the channel.
type chanClient struct {
wg *sync.WaitGroup
ch chan<- *tikvrpc.Request
}
func (c *chanClient) Close() error {
return nil
}
func (c *chanClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
c.wg.Wait()
c.ch <- req
return nil, nil
}
func (s *testClientSuite) TestCollapseResolveLock(c *C) {
buildResolveLockReq := func(regionID uint64, startTS uint64, commitTS uint64, keys [][]byte) *tikvrpc.Request {
region := &metapb.Region{Id: regionID}
req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, &kvrpcpb.ResolveLockRequest{
StartVersion: startTS,
CommitVersion: commitTS,
Keys: keys,
})
tikvrpc.SetContext(req, region, nil)
return req
}
buildBatchResolveLockReq := func(regionID uint64, txnInfos []*kvrpcpb.TxnInfo) *tikvrpc.Request {
region := &metapb.Region{Id: regionID}
req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, &kvrpcpb.ResolveLockRequest{
TxnInfos: txnInfos,
})
tikvrpc.SetContext(req, region, nil)
return req
}
var wg sync.WaitGroup
reqCh := make(chan *tikvrpc.Request)
client := reqCollapse{&chanClient{wg: &wg, ch: reqCh}}
ctx := context.Background()
// Collapse ResolveLock.
resolveLockReq := buildResolveLockReq(1, 10, 20, nil)
wg.Add(1)
go client.SendRequest(ctx, "", resolveLockReq, time.Second)
go client.SendRequest(ctx, "", resolveLockReq, time.Second)
time.Sleep(300 * time.Millisecond)
wg.Done()
req := <-reqCh
c.Assert(*req, DeepEquals, *resolveLockReq)
select {
case <-reqCh:
c.Fatal("fail to collapse ResolveLock")
default:
}
// Don't collapse ResolveLockLite.
resolveLockLiteReq := buildResolveLockReq(1, 10, 20, [][]byte{[]byte("foo")})
wg.Add(1)
go client.SendRequest(ctx, "", resolveLockLiteReq, time.Second)
go client.SendRequest(ctx, "", resolveLockLiteReq, time.Second)
time.Sleep(300 * time.Millisecond)
wg.Done()
for i := 0; i < 2; i++ {
req := <-reqCh
c.Assert(*req, DeepEquals, *resolveLockLiteReq)
}
// Don't collapse BatchResolveLock.
batchResolveLockReq := buildBatchResolveLockReq(1, []*kvrpcpb.TxnInfo{
{Txn: 10, Status: 20},
})
wg.Add(1)
go client.SendRequest(ctx, "", batchResolveLockReq, time.Second)
go client.SendRequest(ctx, "", batchResolveLockReq, time.Second)
time.Sleep(300 * time.Millisecond)
wg.Done()
for i := 0; i < 2; i++ {
req := <-reqCh
c.Assert(*req, DeepEquals, *batchResolveLockReq)
}
// Mixed
wg.Add(1)
go client.SendRequest(ctx, "", resolveLockReq, time.Second)
go client.SendRequest(ctx, "", resolveLockLiteReq, time.Second)
go client.SendRequest(ctx, "", batchResolveLockReq, time.Second)
time.Sleep(300 * time.Millisecond)
wg.Done()
for i := 0; i < 3; i++ {
<-reqCh
}
select {
case <-reqCh:
c.Fatal("unexpected request")
default:
}
}
func (s *testClientSerialSuite) TestForwardMetadataByUnaryCall(c *C) {
server, port := startMockTikvService()
c.Assert(port > 0, IsTrue)
defer server.Stop()
addr := fmt.Sprintf("%s:%d", "127.0.0.1", port)
// Disable batch.
defer config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.MaxBatchSize = 0
conf.TiKVClient.GrpcConnectionCount = 1
})()
rpcClient := NewRPCClient(config.Security{})
defer rpcClient.closeConns()
var checkCnt uint64
// Check no corresponding metadata if ForwardedHost is empty.
server.setMetaChecker(func(ctx context.Context) error {
atomic.AddUint64(&checkCnt, 1)
// gRPC may set some metadata by default, e.g. "context-type".
md, ok := metadata.FromIncomingContext(ctx)
if ok {
vals := md.Get(forwardMetadataKey)
c.Assert(len(vals), Equals, 0)
}
return nil
})
// Prewrite represents unary-unary call.
prewriteReq := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, &kvrpcpb.PrewriteRequest{})
for i := 0; i < 3; i++ {
_, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second)
c.Assert(err, IsNil)
}
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(3))
// CopStream represents unary-stream call.
copStreamReq := tikvrpc.NewRequest(tikvrpc.CmdCopStream, &coprocessor.Request{})
_, err := rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second)
c.Assert(err, IsNil)
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(4))
checkCnt = 0
forwardedHost := "127.0.0.1:6666"
// Check the metadata exists.
server.setMetaChecker(func(ctx context.Context) error {
atomic.AddUint64(&checkCnt, 1)
// gRPC may set some metadata by default, e.g. "context-type".
md, ok := metadata.FromIncomingContext(ctx)
c.Assert(ok, IsTrue)
vals := md.Get(forwardMetadataKey)
c.Assert(vals, DeepEquals, []string{forwardedHost})
return nil
})
prewriteReq.ForwardedHost = forwardedHost
for i := 0; i < 3; i++ {
_, err = rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second)
c.Assert(err, IsNil)
}
// checkCnt should be 3 because we don't use BatchCommands for redirection for now.
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(3))
copStreamReq.ForwardedHost = forwardedHost
_, err = rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second)
c.Assert(err, IsNil)
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(4))
}
func (s *testClientSerialSuite) TestForwardMetadataByBatchCommands(c *C) {
server, port := startMockTikvService()
c.Assert(port > 0, IsTrue)
defer server.Stop()
addr := fmt.Sprintf("%s:%d", "127.0.0.1", port)
// Enable batch and limit the connection count to 1 so that
// there is only one BatchCommands stream for each host or forwarded host.
defer config.UpdateGlobal(func(conf *config.Config) {
conf.TiKVClient.MaxBatchSize = 128
conf.TiKVClient.GrpcConnectionCount = 1
})()
rpcClient := NewRPCClient(config.Security{})
defer rpcClient.closeConns()
var checkCnt uint64
setCheckHandler := func(forwardedHost string) {
server.setMetaChecker(func(ctx context.Context) error {
atomic.AddUint64(&checkCnt, 1)
md, ok := metadata.FromIncomingContext(ctx)
if forwardedHost == "" {
if ok {
vals := md.Get(forwardMetadataKey)
c.Assert(len(vals), Equals, 0)
}
} else {
c.Assert(ok, IsTrue)
vals := md.Get(forwardMetadataKey)
c.Assert(vals, DeepEquals, []string{forwardedHost})
}
return nil
})
}
prewriteReq := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, &kvrpcpb.PrewriteRequest{})
forwardedHosts := []string{"", "127.0.0.1:6666", "127.0.0.1:7777", "127.0.0.1:8888"}
for i, forwardedHost := range forwardedHosts {
setCheckHandler(forwardedHost)
prewriteReq.ForwardedHost = forwardedHost
for i := 0; i < 3; i++ {
_, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second)
c.Assert(err, IsNil)
}
// checkCnt should be i because there is a stream for each forwardedHost.
c.Assert(atomic.LoadUint64(&checkCnt), Equals, 1+uint64(i))
}
checkCnt = 0
// CopStream is a unary-stream call which doesn't support batch.
copStreamReq := tikvrpc.NewRequest(tikvrpc.CmdCopStream, &coprocessor.Request{})
// Check no corresponding metadata if forwardedHost is empty.
setCheckHandler("")
_, err := rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second)
c.Assert(err, IsNil)
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(1))
copStreamReq.ForwardedHost = "127.0.0.1:6666"
// Check the metadata exists.
setCheckHandler(copStreamReq.ForwardedHost)
_, err = rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second)
c.Assert(err, IsNil)
c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(2))
}
func (s *testClientSuite) TestBatchCommandsBuilder(c *C) {
builder := newBatchCommandsBuilder(128)
// Test no forwarding requests.
builder.reset()
req := new(tikvpb.BatchCommandsRequest_Request)
for i := 0; i < 10; i++ {
builder.push(&batchCommandsEntry{req: req})
c.Assert(builder.len(), Equals, i+1)
}
entryMap := make(map[uint64]*batchCommandsEntry)
batchedReq, forwardingReqs := builder.build(func(id uint64, e *batchCommandsEntry) {
entryMap[id] = e
})
c.Assert(len(batchedReq.GetRequests()), Equals, 10)
c.Assert(len(batchedReq.GetRequestIds()), Equals, 10)
c.Assert(len(entryMap), Equals, 10)
for i, id := range batchedReq.GetRequestIds() {
c.Assert(id, Equals, uint64(i))
c.Assert(entryMap[id].req, Equals, batchedReq.GetRequests()[i])
}
c.Assert(len(forwardingReqs), Equals, 0)
c.Assert(builder.idAlloc, Equals, uint64(10))
// Test collecting forwarding requests.
builder.reset()
forwardedHosts := []string{"", "127.0.0.1:6666", "127.0.0.1:7777", "127.0.0.1:8888"}
for i := range forwardedHosts {
for j, host := range forwardedHosts {
// Each forwarded host has incremental count of requests
// and interleaves with each other.
if i <= j {
builder.push(&batchCommandsEntry{req: req, forwardedHost: host})
}
}
}
entryMap = make(map[uint64]*batchCommandsEntry)
batchedReq, forwardingReqs = builder.build(func(id uint64, e *batchCommandsEntry) {
entryMap[id] = e
})
c.Assert(len(batchedReq.GetRequests()), Equals, 1)
c.Assert(len(batchedReq.GetRequestIds()), Equals, 1)
c.Assert(len(forwardingReqs), Equals, 3)
for i, host := range forwardedHosts[1:] {
c.Assert(len(forwardingReqs[host].GetRequests()), Equals, i+2)
c.Assert(len(forwardingReqs[host].GetRequestIds()), Equals, i+2)
}
c.Assert(builder.idAlloc, Equals, uint64(10+builder.len()))
c.Assert(len(entryMap), Equals, builder.len())
for host, forwardingReq := range forwardingReqs {
for i, id := range forwardingReq.GetRequestIds() {
c.Assert(entryMap[id].req, Equals, forwardingReq.GetRequests()[i])
c.Assert(entryMap[id].forwardedHost, Equals, host)
}
}
// Test not collecting canceled requests
builder.reset()
entries := []*batchCommandsEntry{
{canceled: 1, req: req},
{canceled: 0, req: req},
{canceled: 1, req: req},
{canceled: 1, req: req},
{canceled: 0, req: req},
}
for _, entry := range entries {
builder.push(entry)
}
entryMap = make(map[uint64]*batchCommandsEntry)
batchedReq, forwardingReqs = builder.build(func(id uint64, e *batchCommandsEntry) {
entryMap[id] = e
})
c.Assert(len(batchedReq.GetRequests()), Equals, 2)
c.Assert(len(batchedReq.GetRequestIds()), Equals, 2)
c.Assert(len(forwardingReqs), Equals, 0)
c.Assert(len(entryMap), Equals, 2)
for i, id := range batchedReq.GetRequestIds() {
c.Assert(entryMap[id].req, Equals, batchedReq.GetRequests()[i])
c.Assert(entryMap[id].isCanceled(), IsFalse)
}
// Test canceling all requests
builder.reset()
entries = entries[:0]
for i := 0; i < 3; i++ {
entry := &batchCommandsEntry{req: req, res: make(chan *tikvpb.BatchCommandsResponse_Response, 1)}
entries = append(entries, entry)
builder.push(entry)
}
err := errors.New("error")
builder.cancel(err)
for _, entry := range entries {
_, ok := <-entry.res
c.Assert(ok, IsFalse)
c.Assert(entry.err, Equals, err)
}
// Test reset
builder.reset()
c.Assert(builder.len(), Equals, 0)
c.Assert(len(builder.entries), Equals, 0)
c.Assert(len(builder.requests), Equals, 0)
c.Assert(len(builder.requestIDs), Equals, 0)
c.Assert(len(builder.forwardingReqs), Equals, 0)
c.Assert(builder.idAlloc, Not(Equals), 0)
}
| store/tikv/client_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0002598641440272331,
0.00017382620717398822,
0.00016172627510968596,
0.00017140866839326918,
0.0000174014967342373
] |
{
"id": 3,
"code_window": [
"\t\"github.com/pingcap/tidb/planner/property\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\tdriver \"github.com/pingcap/tidb/types/parser_driver\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "add",
"edit_start_line_idx": 35
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"math"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/types"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/set"
"go.uber.org/zap"
"golang.org/x/tools/container/intsets"
)
const (
// SelectionFactor is the default factor of the selectivity.
// For example, If we have no idea how to estimate the selectivity
// of a Selection or a JoinCondition, we can use this default value.
SelectionFactor = 0.8
distinctFactor = 0.8
// If the actual row count is much more than the limit count, the unordered scan may cost much more than keep order.
// So when a limit exists, we don't apply the DescScanFactor.
smallScanThreshold = 10000
)
var aggFuncFactor = map[string]float64{
ast.AggFuncCount: 1.0,
ast.AggFuncSum: 1.0,
ast.AggFuncAvg: 2.0,
ast.AggFuncFirstRow: 0.1,
ast.AggFuncMax: 1.0,
ast.AggFuncMin: 1.0,
ast.AggFuncGroupConcat: 1.0,
ast.AggFuncBitOr: 0.9,
ast.AggFuncBitXor: 0.9,
ast.AggFuncBitAnd: 0.9,
ast.AggFuncVarPop: 3.0,
ast.AggFuncVarSamp: 3.0,
ast.AggFuncStddevPop: 3.0,
ast.AggFuncStddevSamp: 3.0,
"default": 1.5,
}
// PlanCounterTp is used in hint nth_plan() to indicate which plan to use.
type PlanCounterTp int64
// PlanCounterDisabled is the default value of PlanCounterTp, indicating that optimizer needn't force a plan.
var PlanCounterDisabled PlanCounterTp = -1
// Dec minus PlanCounterTp value by x.
func (c *PlanCounterTp) Dec(x int64) {
if *c <= 0 {
return
}
*c = PlanCounterTp(int64(*c) - x)
if *c < 0 {
*c = 0
}
}
// Empty indicates whether the PlanCounterTp is clear now.
func (c *PlanCounterTp) Empty() bool {
return *c == 0
}
// IsForce indicates whether to force a plan.
func (c *PlanCounterTp) IsForce() bool {
return *c != -1
}
var invalidTask = &rootTask{cst: math.MaxFloat64}
// GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only
// consider the case that all expression are columns.
func GetPropByOrderByItems(items []*util.ByItems) (*property.PhysicalProperty, bool) {
propItems := make([]property.SortItem, 0, len(items))
for _, item := range items {
col, ok := item.Expr.(*expression.Column)
if !ok {
return nil, false
}
propItems = append(propItems, property.SortItem{Col: col, Desc: item.Desc})
}
return &property.PhysicalProperty{SortItems: propItems}, true
}
// GetPropByOrderByItemsContainScalarFunc will check if this sort property can be pushed or not. In order to simplify the
// problem, we only consider the case that all expression are columns or some special scalar functions.
func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.PhysicalProperty, bool, bool) {
propItems := make([]property.SortItem, 0, len(items))
onlyColumn := true
for _, item := range items {
switch expr := item.Expr.(type) {
case *expression.Column:
propItems = append(propItems, property.SortItem{Col: expr, Desc: item.Desc})
case *expression.ScalarFunction:
col, desc := expr.GetSingleColumn(item.Desc)
if col == nil {
return nil, false, false
}
propItems = append(propItems, property.SortItem{Col: col, Desc: desc})
onlyColumn = false
default:
return nil, false, false
}
}
return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn
}
func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (task, int64, error) {
// If the required property is not empty and the row count > 1,
// we cannot ensure this required property.
// But if the row count is 0 or 1, we don't need to care about the property.
if (!prop.IsEmpty() && p.RowCount > 1) || planCounter.Empty() {
return invalidTask, 0, nil
}
dual := PhysicalTableDual{
RowCount: p.RowCount,
}.Init(p.ctx, p.stats, p.blockOffset)
dual.SetSchema(p.schema)
planCounter.Dec(1)
return &rootTask{p: dual}, 1, nil
}
func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (task, int64, error) {
if !prop.IsEmpty() || planCounter.Empty() {
return invalidTask, 0, nil
}
pShow := PhysicalShow{ShowContents: p.ShowContents}.Init(p.ctx)
pShow.SetSchema(p.schema)
planCounter.Dec(1)
return &rootTask{p: pShow}, 1, nil
}
func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (task, int64, error) {
if !prop.IsEmpty() || planCounter.Empty() {
return invalidTask, 0, nil
}
pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.ctx)
pShow.SetSchema(p.schema)
planCounter.Dec(1)
return &rootTask{p: pShow}, 1, nil
}
// rebuildChildTasks rebuilds the childTasks to make the clock_th combination.
func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]task, pp PhysicalPlan, childCnts []int64, planCounter int64, TS uint64) error {
// The taskMap of children nodes should be rolled back first.
for _, child := range p.children {
child.rollBackTaskMap(TS)
}
multAll := int64(1)
var curClock PlanCounterTp
for _, x := range childCnts {
multAll *= x
}
*childTasks = (*childTasks)[:0]
for j, child := range p.children {
multAll /= childCnts[j]
curClock = PlanCounterTp((planCounter-1)/multAll + 1)
childTask, _, err := child.findBestTask(pp.GetChildReqProps(j), &curClock)
planCounter = (planCounter-1)%multAll + 1
if err != nil {
return err
}
if curClock != 0 {
return errors.Errorf("PlanCounterTp planCounter is not handled")
}
if childTask != nil && childTask.invalid() {
return errors.Errorf("The current plan is invalid, please skip this plan.")
}
*childTasks = append(*childTasks, childTask)
}
return nil
}
func (p *baseLogicalPlan) enumeratePhysicalPlans4Task(physicalPlans []PhysicalPlan, prop *property.PhysicalProperty, addEnforcer bool, planCounter *PlanCounterTp) (task, int64, error) {
var bestTask task = invalidTask
var curCntPlan, cntPlan int64
childTasks := make([]task, 0, len(p.children))
childCnts := make([]int64, len(p.children))
cntPlan = 0
for _, pp := range physicalPlans {
// Find best child tasks firstly.
childTasks = childTasks[:0]
// The curCntPlan records the number of possible plans for pp
curCntPlan = 1
TimeStampNow := p.GetlogicalTS4TaskMap()
savedPlanID := p.ctx.GetSessionVars().PlanID
for j, child := range p.children {
childTask, cnt, err := child.findBestTask(pp.GetChildReqProps(j), &PlanCounterDisabled)
childCnts[j] = cnt
if err != nil {
return nil, 0, err
}
curCntPlan = curCntPlan * cnt
if childTask != nil && childTask.invalid() {
break
}
childTasks = append(childTasks, childTask)
}
// This check makes sure that there is no invalid child task.
if len(childTasks) != len(p.children) {
continue
}
// If the target plan can be found in this physicalPlan(pp), rebuild childTasks to build the corresponding combination.
if planCounter.IsForce() && int64(*planCounter) <= curCntPlan {
p.ctx.GetSessionVars().PlanID = savedPlanID
curCntPlan = int64(*planCounter)
err := p.rebuildChildTasks(&childTasks, pp, childCnts, int64(*planCounter), TimeStampNow)
if err != nil {
return nil, 0, err
}
}
// Combine best child tasks with parent physical plan.
curTask := pp.attach2Task(childTasks...)
if curTask.invalid() {
continue
}
// An optimal task could not satisfy the property, so it should be converted here.
if _, ok := curTask.(*rootTask); !ok && prop.TaskTp == property.RootTaskType {
curTask = curTask.convertToRootTask(p.ctx)
}
// Enforce curTask property
if addEnforcer {
curTask = enforceProperty(prop, curTask, p.basePlan.ctx)
}
// Optimize by shuffle executor to running in parallel manner.
if prop.IsEmpty() {
// Currently, we do not regard shuffled plan as a new plan.
curTask = optimizeByShuffle(curTask, p.basePlan.ctx)
}
cntPlan += curCntPlan
planCounter.Dec(curCntPlan)
if planCounter.Empty() {
bestTask = curTask
break
}
// Get the most efficient one.
if curTask.cost() < bestTask.cost() || (bestTask.invalid() && !curTask.invalid()) {
bestTask = curTask
}
}
return bestTask, cntPlan, nil
}
// findBestTask implements LogicalPlan interface.
func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (bestTask task, cntPlan int64, err error) {
// If p is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself,
// and set inner child prop nil, so here we do nothing.
if prop == nil {
return nil, 1, nil
}
// Look up the task with this prop in the task map.
// It's used to reduce double counting.
bestTask = p.getTask(prop)
if bestTask != nil {
planCounter.Dec(1)
return bestTask, 1, nil
}
canAddEnforcer := prop.CanAddEnforcer
if prop.TaskTp != property.RootTaskType && !prop.IsFlashProp() {
// Currently all plan cannot totally push down to TiKV.
p.storeTask(prop, invalidTask)
return invalidTask, 0, nil
}
bestTask = invalidTask
cntPlan = 0
// prop should be read only because its cached hashcode might be not consistent
// when it is changed. So we clone a new one for the temporary changes.
newProp := prop.CloneEssentialFields()
var plansFitsProp, plansNeedEnforce []PhysicalPlan
var hintWorksWithProp bool
// Maybe the plan can satisfy the required property,
// so we try to get the task without the enforced sort first.
plansFitsProp, hintWorksWithProp = p.self.exhaustPhysicalPlans(newProp)
if !hintWorksWithProp && !newProp.IsEmpty() {
// If there is a hint in the plan and the hint cannot satisfy the property,
// we enforce this property and try to generate the PhysicalPlan again to
// make sure the hint can work.
canAddEnforcer = true
}
if canAddEnforcer {
// Then, we use the empty property to get physicalPlans and
// try to get the task with an enforced sort.
newProp.SortItems = []property.SortItem{}
newProp.ExpectedCnt = math.MaxFloat64
newProp.PartitionCols = nil
newProp.PartitionTp = property.AnyType
var hintCanWork bool
plansNeedEnforce, hintCanWork = p.self.exhaustPhysicalPlans(newProp)
if hintCanWork && !hintWorksWithProp {
// If the hint can work with the empty property, but cannot work with
// the required property, we give up `plansFitProp` to make sure the hint
// can work.
plansFitsProp = nil
}
if !hintCanWork && !hintWorksWithProp && !prop.CanAddEnforcer {
// If the original property is not enforced and hint cannot
// work anyway, we give up `plansNeedEnforce` for efficiency,
plansNeedEnforce = nil
}
newProp = prop
}
var cnt int64
var curTask task
if bestTask, cnt, err = p.enumeratePhysicalPlans4Task(plansFitsProp, newProp, false, planCounter); err != nil {
return nil, 0, err
}
cntPlan += cnt
if planCounter.Empty() {
goto END
}
curTask, cnt, err = p.enumeratePhysicalPlans4Task(plansNeedEnforce, newProp, true, planCounter)
if err != nil {
return nil, 0, err
}
cntPlan += cnt
if planCounter.Empty() {
bestTask = curTask
goto END
}
if curTask.cost() < bestTask.cost() || (bestTask.invalid() && !curTask.invalid()) {
bestTask = curTask
}
END:
p.storeTask(prop, bestTask)
return bestTask, cntPlan, nil
}
func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (t task, cntPlan int64, err error) {
if !prop.IsEmpty() || planCounter.Empty() {
return invalidTask, 0, nil
}
memTable := PhysicalMemTable{
DBName: p.DBName,
Table: p.TableInfo,
Columns: p.TableInfo.Columns,
Extractor: p.Extractor,
QueryTimeRange: p.QueryTimeRange,
}.Init(p.ctx, p.stats, p.blockOffset)
memTable.SetSchema(p.schema)
planCounter.Dec(1)
return &rootTask{p: memTable}, 1, nil
}
// tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual.
func (ds *DataSource) tryToGetDualTask() (task, error) {
for _, cond := range ds.pushedDownConds {
if con, ok := cond.(*expression.Constant); ok && con.DeferredExpr == nil && con.ParamMarker == nil {
result, _, err := expression.EvalBool(ds.ctx, []expression.Expression{cond}, chunk.Row{})
if err != nil {
return nil, err
}
if !result {
dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats, ds.blockOffset)
dual.SetSchema(ds.schema)
return &rootTask{
p: dual,
}, nil
}
}
}
return nil, nil
}
// candidatePath is used to maintain required info for skyline pruning.
type candidatePath struct {
path *util.AccessPath
columnSet *intsets.Sparse // columnSet is the set of columns that occurred in the access conditions.
isSingleScan bool
isMatchProp bool
}
// compareColumnSet will compares the two set. The last return value is used to indicate
// if they are comparable, it is false when both two sets have columns that do not occur in the other.
// When the second return value is true, the value of first:
// (1) -1 means that `l` is a strict subset of `r`;
// (2) 0 means that `l` equals to `r`;
// (3) 1 means that `l` is a strict superset of `r`.
func compareColumnSet(l, r *intsets.Sparse) (int, bool) {
lLen, rLen := l.Len(), r.Len()
if lLen < rLen {
// -1 is meaningful only when l.SubsetOf(r) is true.
return -1, l.SubsetOf(r)
}
if lLen == rLen {
// 0 is meaningful only when l.SubsetOf(r) is true.
return 0, l.SubsetOf(r)
}
// 1 is meaningful only when r.SubsetOf(l) is true.
return 1, r.SubsetOf(l)
}
func compareBool(l, r bool) int {
if l == r {
return 0
}
if !l {
return -1
}
return 1
}
// compareCandidates is the core of skyline pruning. It compares the two candidate paths on three dimensions:
// (1): the set of columns that occurred in the access condition,
// (2): whether or not it matches the physical property
// (3): does it require a double scan.
// If `x` is not worse than `y` at all factors,
// and there exists one factor that `x` is better than `y`, then `x` is better than `y`.
func compareCandidates(lhs, rhs *candidatePath) int {
setsResult, comparable := compareColumnSet(lhs.columnSet, rhs.columnSet)
if !comparable {
return 0
}
scanResult := compareBool(lhs.isSingleScan, rhs.isSingleScan)
matchResult := compareBool(lhs.isMatchProp, rhs.isMatchProp)
sum := setsResult + scanResult + matchResult
if setsResult >= 0 && scanResult >= 0 && matchResult >= 0 && sum > 0 {
return 1
}
if setsResult <= 0 && scanResult <= 0 && matchResult <= 0 && sum < 0 {
return -1
}
return 0
}
func (ds *DataSource) getTableCandidate(path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath {
candidate := &candidatePath{path: path}
if path.IsIntHandlePath {
pkCol := ds.getPKIsHandleCol()
if len(prop.SortItems) == 1 && pkCol != nil {
candidate.isMatchProp = prop.SortItems[0].Col.Equal(nil, pkCol)
if path.StoreType == kv.TiFlash {
candidate.isMatchProp = candidate.isMatchProp && !prop.SortItems[0].Desc
}
}
} else {
all, _ := prop.AllSameOrder()
// When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because
// it needs not to keep order for index scan.
if !prop.IsEmpty() && all {
for i, col := range path.IdxCols {
if col.Equal(nil, prop.SortItems[0].Col) {
candidate.isMatchProp = matchIndicesProp(path.IdxCols[i:], path.IdxColLens[i:], prop.SortItems)
break
} else if i >= path.EqCondCount {
break
}
}
}
}
candidate.columnSet = expression.ExtractColumnSet(path.AccessConds)
candidate.isSingleScan = true
return candidate
}
func (ds *DataSource) getIndexCandidate(path *util.AccessPath, prop *property.PhysicalProperty, isSingleScan bool) *candidatePath {
candidate := &candidatePath{path: path}
all, _ := prop.AllSameOrder()
// When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because
// it needs not to keep order for index scan.
if !prop.IsEmpty() && all {
for i, col := range path.IdxCols {
if col.Equal(nil, prop.SortItems[0].Col) {
candidate.isMatchProp = matchIndicesProp(path.IdxCols[i:], path.IdxColLens[i:], prop.SortItems)
break
} else if i >= path.EqCondCount {
break
}
}
}
candidate.columnSet = expression.ExtractColumnSet(path.AccessConds)
candidate.isSingleScan = isSingleScan
return candidate
}
func (ds *DataSource) getIndexMergeCandidate(path *util.AccessPath) *candidatePath {
candidate := &candidatePath{path: path}
return candidate
}
// skylinePruning prunes access paths according to different factors. An access path can be pruned only if
// there exists a path that is not worse than it at all factors and there is at least one better factor.
func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candidatePath {
candidates := make([]*candidatePath, 0, 4)
for _, path := range ds.possibleAccessPaths {
if path.PartialIndexPaths != nil {
candidates = append(candidates, ds.getIndexMergeCandidate(path))
continue
}
// if we already know the range of the scan is empty, just return a TableDual
if len(path.Ranges) == 0 {
return []*candidatePath{{path: path}}
}
if path.StoreType != kv.TiFlash && prop.IsFlashProp() {
continue
}
var currentCandidate *candidatePath
if path.IsTablePath() {
if path.StoreType == kv.TiFlash {
if path.IsTiFlashGlobalRead && prop.TaskTp == property.CopTiFlashGlobalReadTaskType {
currentCandidate = ds.getTableCandidate(path, prop)
}
if !path.IsTiFlashGlobalRead && prop.TaskTp != property.CopTiFlashGlobalReadTaskType {
currentCandidate = ds.getTableCandidate(path, prop)
}
} else {
if !path.IsTiFlashGlobalRead && !prop.IsFlashProp() {
currentCandidate = ds.getTableCandidate(path, prop)
}
}
if currentCandidate == nil {
continue
}
} else {
coveredByIdx := ds.isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo)
if len(path.AccessConds) > 0 || !prop.IsEmpty() || path.Forced || coveredByIdx {
// We will use index to generate physical plan if any of the following conditions is satisfied:
// 1. This path's access cond is not nil.
// 2. We have a non-empty prop to match.
// 3. This index is forced to choose.
// 4. The needed columns are all covered by index columns(and handleCol).
currentCandidate = ds.getIndexCandidate(path, prop, coveredByIdx)
} else {
continue
}
}
pruned := false
for i := len(candidates) - 1; i >= 0; i-- {
if candidates[i].path.StoreType == kv.TiFlash {
continue
}
result := compareCandidates(candidates[i], currentCandidate)
if result == 1 {
pruned = true
// We can break here because the current candidate cannot prune others anymore.
break
} else if result == -1 {
candidates = append(candidates[:i], candidates[i+1:]...)
}
}
if !pruned {
candidates = append(candidates, currentCandidate)
}
}
if ds.ctx.GetSessionVars().GetAllowPreferRangeScan() && len(candidates) > 1 {
// remove the table/index full scan path
for i, c := range candidates {
for _, ran := range c.path.Ranges {
if ran.IsFullRange() {
candidates = append(candidates[:i], candidates[i+1:]...)
return candidates
}
}
}
}
return candidates
}
// findBestTask implements the PhysicalPlan interface.
// It will enumerate all the available indices and choose a plan with least cost.
func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp) (t task, cntPlan int64, err error) {
// If ds is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself,
// and set inner child prop nil, so here we do nothing.
if prop == nil {
planCounter.Dec(1)
return nil, 1, nil
}
t = ds.getTask(prop)
if t != nil {
cntPlan = 1
planCounter.Dec(1)
return
}
var cnt int64
// If prop.enforced is true, the prop.cols need to be set nil for ds.findBestTask.
// Before function return, reset it for enforcing task prop and storing map<prop,task>.
oldProp := prop.CloneEssentialFields()
if prop.CanAddEnforcer {
// First, get the bestTask without enforced prop
prop.CanAddEnforcer = false
t, cnt, err = ds.findBestTask(prop, planCounter)
if err != nil {
return nil, 0, err
}
prop.CanAddEnforcer = true
if t != invalidTask {
ds.storeTask(prop, t)
cntPlan = cnt
return
}
// Next, get the bestTask with enforced prop
prop.SortItems = []property.SortItem{}
prop.PartitionTp = property.AnyType
} else if prop.PartitionTp != property.AnyType {
return invalidTask, 0, nil
}
defer func() {
if err != nil {
return
}
if prop.CanAddEnforcer {
*prop = *oldProp
t = enforceProperty(prop, t, ds.basePlan.ctx)
prop.CanAddEnforcer = true
}
ds.storeTask(prop, t)
if ds.SampleInfo != nil && !t.invalid() {
if _, ok := t.plan().(*PhysicalTableSample); !ok {
warning := expression.ErrInvalidTableSample.GenWithStackByArgs("plan not supported")
ds.ctx.GetSessionVars().StmtCtx.AppendWarning(warning)
}
}
}()
t, err = ds.tryToGetDualTask()
if err != nil || t != nil {
planCounter.Dec(1)
return t, 1, err
}
t = invalidTask
candidates := ds.skylinePruning(prop)
cntPlan = 0
for _, candidate := range candidates {
path := candidate.path
if path.PartialIndexPaths != nil {
idxMergeTask, err := ds.convertToIndexMergeScan(prop, candidate)
if err != nil {
return nil, 0, err
}
if !idxMergeTask.invalid() {
cntPlan += 1
planCounter.Dec(1)
}
if idxMergeTask.cost() < t.cost() || planCounter.Empty() {
t = idxMergeTask
}
if planCounter.Empty() {
return t, cntPlan, nil
}
continue
}
// if we already know the range of the scan is empty, just return a TableDual
if len(path.Ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache {
dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats, ds.blockOffset)
dual.SetSchema(ds.schema)
cntPlan += 1
planCounter.Dec(1)
return &rootTask{
p: dual,
}, cntPlan, nil
}
canConvertPointGet := len(path.Ranges) > 0 && path.StoreType == kv.TiKV
if canConvertPointGet && !path.IsIntHandlePath {
// We simply do not build [batch] point get for prefix indexes. This can be optimized.
canConvertPointGet = path.Index.Unique && !path.Index.HasPrefixIndex()
// If any range cannot cover all columns of the index, we cannot build [batch] point get.
idxColsLen := len(path.Index.Columns)
for _, ran := range path.Ranges {
if len(ran.LowVal) != idxColsLen {
canConvertPointGet = false
break
}
}
}
var hashPartColName *ast.ColumnName
if tblInfo := ds.table.Meta(); canConvertPointGet && tblInfo.GetPartitionInfo() != nil {
// We do not build [batch] point get for dynamic table partitions now. This can be optimized.
if ds.ctx.GetSessionVars().UseDynamicPartitionPrune() {
canConvertPointGet = false
} else if len(path.Ranges) > 1 {
// We can only build batch point get for hash partitions on a simple column now. This is
// decided by the current implementation of `BatchPointGetExec::initialize()`, specifically,
// the `getPhysID()` function. Once we optimize that part, we can come back and enable
// BatchPointGet plan for more cases.
hashPartColName = getHashPartitionColumnName(ds.ctx, tblInfo)
if hashPartColName == nil {
canConvertPointGet = false
}
}
}
if canConvertPointGet {
allRangeIsPoint := true
for _, ran := range path.Ranges {
if !ran.IsPoint(ds.ctx.GetSessionVars().StmtCtx) {
allRangeIsPoint = false
break
}
}
if allRangeIsPoint {
var pointGetTask task
if len(path.Ranges) == 1 {
pointGetTask = ds.convertToPointGet(prop, candidate)
} else {
pointGetTask = ds.convertToBatchPointGet(prop, candidate, hashPartColName)
}
if !pointGetTask.invalid() {
cntPlan += 1
planCounter.Dec(1)
}
if pointGetTask.cost() < t.cost() || planCounter.Empty() {
t = pointGetTask
if planCounter.Empty() {
return
}
continue
}
}
}
if path.IsTablePath() {
if ds.preferStoreType&preferTiFlash != 0 && path.StoreType == kv.TiKV {
continue
}
if ds.preferStoreType&preferTiKV != 0 && path.StoreType == kv.TiFlash {
continue
}
var tblTask task
if ds.SampleInfo != nil {
tblTask, err = ds.convertToSampleTable(prop, candidate)
} else {
tblTask, err = ds.convertToTableScan(prop, candidate)
}
if err != nil {
return nil, 0, err
}
if !tblTask.invalid() {
cntPlan += 1
planCounter.Dec(1)
}
if tblTask.cost() < t.cost() || planCounter.Empty() {
t = tblTask
}
if planCounter.Empty() {
return t, cntPlan, nil
}
continue
}
// TiFlash storage do not support index scan.
if ds.preferStoreType&preferTiFlash != 0 {
continue
}
idxTask, err := ds.convertToIndexScan(prop, candidate)
if err != nil {
return nil, 0, err
}
if !idxTask.invalid() {
cntPlan += 1
planCounter.Dec(1)
}
if idxTask.cost() < t.cost() || planCounter.Empty() {
t = idxTask
}
if planCounter.Empty() {
return t, cntPlan, nil
}
}
return
}
func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) {
if prop.TaskTp != property.RootTaskType || !prop.IsEmpty() {
return invalidTask, nil
}
path := candidate.path
var totalCost float64
scans := make([]PhysicalPlan, 0, len(path.PartialIndexPaths))
cop := &copTask{
indexPlanFinished: true,
tblColHists: ds.TblColHists,
}
cop.partitionInfo = PartitionInfo{
PruningConds: ds.allConds,
PartitionNames: ds.partitionNames,
Columns: ds.TblCols,
ColumnNames: ds.names,
}
for _, partPath := range path.PartialIndexPaths {
var scan PhysicalPlan
var partialCost float64
if partPath.IsTablePath() {
scan, partialCost = ds.convertToPartialTableScan(prop, partPath)
} else {
scan, partialCost = ds.convertToPartialIndexScan(prop, partPath)
}
scans = append(scans, scan)
totalCost += partialCost
}
totalRowCount := path.CountAfterAccess
if prop.ExpectedCnt < ds.stats.RowCount {
totalRowCount *= prop.ExpectedCnt / ds.stats.RowCount
}
ts, partialCost, err := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount)
if err != nil {
return nil, err
}
totalCost += partialCost
cop.tablePlan = ts
cop.idxMergePartPlans = scans
cop.cst = totalCost
task = cop.convertToRootTask(ds.ctx)
return task, nil
}
func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, path *util.AccessPath) (
indexPlan PhysicalPlan,
partialCost float64) {
idx := path.Index
is, partialCost, rowCount := ds.getOriginalPhysicalIndexScan(prop, path, false, false)
rowSize := is.indexScanRowSize(idx, ds, false)
// TODO: Consider using isCoveringIndex() to avoid another TableRead
indexConds := path.IndexFilters
sessVars := ds.ctx.GetSessionVars()
if indexConds != nil {
var selectivity float64
partialCost += rowCount * sessVars.CopCPUFactor
if path.CountAfterAccess > 0 {
selectivity = path.CountAfterIndex / path.CountAfterAccess
}
rowCount = is.stats.RowCount * selectivity
stats := &property.StatsInfo{RowCount: rowCount}
stats.StatsVersion = ds.statisticTable.Version
if ds.statisticTable.Pseudo {
stats.StatsVersion = statistics.PseudoVersion
}
indexPlan := PhysicalSelection{Conditions: indexConds}.Init(is.ctx, stats, ds.blockOffset)
indexPlan.SetChildren(is)
partialCost += rowCount * rowSize * sessVars.NetworkFactor
return indexPlan, partialCost
}
partialCost += rowCount * rowSize * sessVars.NetworkFactor
indexPlan = is
return indexPlan, partialCost
}
func (ds *DataSource) convertToPartialTableScan(prop *property.PhysicalProperty, path *util.AccessPath) (
tablePlan PhysicalPlan, partialCost float64) {
ts, partialCost, rowCount := ds.getOriginalPhysicalTableScan(prop, path, false)
overwritePartialTableScanSchema(ds, ts)
rowSize := ds.TblColHists.GetAvgRowSize(ds.ctx, ts.schema.Columns, false, false)
sessVars := ds.ctx.GetSessionVars()
if len(ts.filterCondition) > 0 {
selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, ts.filterCondition, nil)
if err != nil {
logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err))
selectivity = SelectionFactor
}
tablePlan = PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.ctx, ts.stats.ScaleByExpectCnt(selectivity*rowCount), ds.blockOffset)
tablePlan.SetChildren(ts)
partialCost += rowCount * sessVars.CopCPUFactor
partialCost += selectivity * rowCount * rowSize * sessVars.NetworkFactor
return tablePlan, partialCost
}
partialCost += rowCount * rowSize * sessVars.NetworkFactor
tablePlan = ts
return tablePlan, partialCost
}
// overwritePartialTableScanSchema change the schema of partial table scan to handle columns.
func overwritePartialTableScanSchema(ds *DataSource, ts *PhysicalTableScan) {
handleCols := ds.handleCols
if handleCols == nil {
handleCols = NewIntHandleCols(ds.newExtraHandleSchemaCol())
}
hdColNum := handleCols.NumCols()
exprCols := make([]*expression.Column, 0, hdColNum)
infoCols := make([]*model.ColumnInfo, 0, hdColNum)
for i := 0; i < hdColNum; i++ {
col := handleCols.GetCol(i)
exprCols = append(exprCols, col)
infoCols = append(infoCols, col.ToInfo())
}
ts.schema = expression.NewSchema(exprCols...)
ts.Columns = infoCols
}
// setIndexMergeTableScanHandleCols set the handle columns of the table scan.
func setIndexMergeTableScanHandleCols(ds *DataSource, ts *PhysicalTableScan) (err error) {
handleCols := ds.handleCols
if handleCols == nil {
handleCols = NewIntHandleCols(ds.newExtraHandleSchemaCol())
}
hdColNum := handleCols.NumCols()
exprCols := make([]*expression.Column, 0, hdColNum)
for i := 0; i < hdColNum; i++ {
col := handleCols.GetCol(i)
exprCols = append(exprCols, col)
}
ts.HandleCols, err = handleCols.ResolveIndices(expression.NewSchema(exprCols...))
return
}
func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, tableFilters []expression.Expression,
totalRowCount float64) (PhysicalPlan, float64, error) {
var partialCost float64
sessVars := ds.ctx.GetSessionVars()
ts := PhysicalTableScan{
Table: ds.tableInfo,
Columns: ds.Columns,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
HandleCols: ds.handleCols,
}.Init(ds.ctx, ds.blockOffset)
ts.SetSchema(ds.schema.Clone())
err := setIndexMergeTableScanHandleCols(ds, ts)
if err != nil {
return nil, 0, err
}
if ts.Table.PKIsHandle {
if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil {
if ds.statisticTable.Columns[pkColInfo.ID] != nil {
ts.Hist = &ds.statisticTable.Columns[pkColInfo.ID].Histogram
}
}
}
rowSize := ds.TblColHists.GetTableAvgRowSize(ds.ctx, ds.TblCols, ts.StoreType, true)
partialCost += totalRowCount * rowSize * sessVars.ScanFactor
ts.stats = ds.tableStats.ScaleByExpectCnt(totalRowCount)
if ds.statisticTable.Pseudo {
ts.stats.StatsVersion = statistics.PseudoVersion
}
if len(tableFilters) > 0 {
partialCost += totalRowCount * sessVars.CopCPUFactor
selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, tableFilters, nil)
if err != nil {
logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err))
selectivity = SelectionFactor
}
sel := PhysicalSelection{Conditions: tableFilters}.Init(ts.ctx, ts.stats.ScaleByExpectCnt(selectivity*totalRowCount), ts.blockOffset)
sel.SetChildren(ts)
return sel, partialCost, nil
}
return ts, partialCost, nil
}
func indexCoveringCol(col *expression.Column, indexCols []*expression.Column, idxColLens []int) bool {
for i, indexCol := range indexCols {
isFullLen := idxColLens[i] == types.UnspecifiedLength || idxColLens[i] == col.RetType.Flen
if indexCol != nil && col.Equal(nil, indexCol) && isFullLen {
return true
}
}
return false
}
func (ds *DataSource) isCoveringIndex(columns, indexColumns []*expression.Column, idxColLens []int, tblInfo *model.TableInfo) bool {
for _, col := range columns {
if tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.RetType.Flag) {
continue
}
if col.ID == model.ExtraHandleID {
continue
}
coveredByPlainIndex := indexCoveringCol(col, indexColumns, idxColLens)
coveredByClusteredIndex := indexCoveringCol(col, ds.commonHandleCols, ds.commonHandleLens)
if !coveredByPlainIndex && !coveredByClusteredIndex {
return false
}
isClusteredNewCollationIdx := collate.NewCollationEnabled() &&
col.GetType().EvalType() == types.ETString &&
!mysql.HasBinaryFlag(col.GetType().Flag)
if !coveredByPlainIndex && coveredByClusteredIndex && isClusteredNewCollationIdx && ds.table.Meta().CommonHandleVersion == 0 {
return false
}
}
return true
}
// If there is a table reader which needs to keep order, we should append a pk to table scan.
func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.Column, bool) {
handleCols := ds.handleCols
if handleCols != nil {
return handleCols.GetCol(0), false
}
handleCol := ds.newExtraHandleSchemaCol()
ts.schema.Append(handleCol)
ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo())
return handleCol, true
}
// convertToIndexScan converts the DataSource to index scan with idx.
func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) {
if !candidate.isSingleScan {
// If it's parent requires single read task, return max cost.
if prop.TaskTp == property.CopSingleReadTaskType {
return invalidTask, nil
}
} else if prop.TaskTp == property.CopDoubleReadTaskType {
// If it's parent requires double read task, return max cost.
return invalidTask, nil
}
if !prop.IsEmpty() && !candidate.isMatchProp {
return invalidTask, nil
}
path := candidate.path
is, cost, _ := ds.getOriginalPhysicalIndexScan(prop, path, candidate.isMatchProp, candidate.isSingleScan)
cop := &copTask{
indexPlan: is,
tblColHists: ds.TblColHists,
tblCols: ds.TblCols,
}
cop.partitionInfo = PartitionInfo{
PruningConds: ds.allConds,
PartitionNames: ds.partitionNames,
Columns: ds.TblCols,
ColumnNames: ds.names,
}
if !candidate.isSingleScan {
// On this way, it's double read case.
ts := PhysicalTableScan{
Columns: ds.Columns,
Table: is.Table,
TableAsName: ds.TableAsName,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
}.Init(ds.ctx, is.blockOffset)
ts.SetSchema(ds.schema.Clone())
ts.SetCost(cost)
cop.tablePlan = ts
}
cop.cst = cost
task = cop
if cop.tablePlan != nil && ds.tableInfo.IsCommonHandle {
cop.commonHandleCols = ds.commonHandleCols
commonHandle := ds.handleCols.(*CommonHandleCols)
for _, col := range commonHandle.columns {
if ds.schema.ColumnIndex(col) == -1 {
ts := cop.tablePlan.(*PhysicalTableScan)
ts.Schema().Append(col)
ts.Columns = append(ts.Columns, col.ToInfo())
cop.needExtraProj = true
}
}
}
if candidate.isMatchProp {
if cop.tablePlan != nil && !ds.tableInfo.IsCommonHandle {
col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds)
cop.extraHandleCol = col
cop.needExtraProj = cop.needExtraProj || isNew
}
cop.keepOrder = true
// IndexScan on partition table can't keep order.
if ds.tableInfo.GetPartitionInfo() != nil {
return invalidTask, nil
}
}
if cop.needExtraProj {
cop.originSchema = ds.schema
}
// prop.IsEmpty() would always return true when coming to here,
// so we can just use prop.ExpectedCnt as parameter of addPushedDownSelection.
finalStats := ds.stats.ScaleByExpectCnt(prop.ExpectedCnt)
is.addPushedDownSelection(cop, ds, path, finalStats)
if prop.TaskTp == property.RootTaskType {
task = task.convertToRootTask(ds.ctx)
} else if _, ok := task.(*rootTask); ok {
return invalidTask, nil
}
return task, nil
}
func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSource, isForScan bool) float64 {
scanCols := make([]*expression.Column, 0, len(idx.Columns)+1)
// If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column.
if len(idx.Columns) == len(is.schema.Columns) {
scanCols = append(scanCols, is.schema.Columns...)
handleCol := ds.getPKIsHandleCol()
if handleCol != nil {
scanCols = append(scanCols, handleCol)
}
} else {
scanCols = is.schema.Columns
}
if isForScan {
return ds.TblColHists.GetIndexAvgRowSize(is.ctx, scanCols, is.Index.Unique)
}
return ds.TblColHists.GetAvgRowSize(is.ctx, scanCols, true, false)
}
// initSchema is used to set the schema of PhysicalIndexScan. Before calling this,
// make sure the following field of PhysicalIndexScan are initialized:
// PhysicalIndexScan.Table *model.TableInfo
// PhysicalIndexScan.Index *model.IndexInfo
// PhysicalIndexScan.Index.Columns []*IndexColumn
// PhysicalIndexScan.IdxCols []*expression.Column
// PhysicalIndexScan.Columns []*model.ColumnInfo
func (is *PhysicalIndexScan) initSchema(idxExprCols []*expression.Column, isDoubleRead bool) {
indexCols := make([]*expression.Column, len(is.IdxCols), len(is.Index.Columns)+1)
copy(indexCols, is.IdxCols)
for i := len(is.IdxCols); i < len(is.Index.Columns); i++ {
if idxExprCols[i] != nil {
indexCols = append(indexCols, idxExprCols[i])
} else {
// TODO: try to reuse the col generated when building the DataSource.
indexCols = append(indexCols, &expression.Column{
ID: is.Table.Columns[is.Index.Columns[i].Offset].ID,
RetType: &is.Table.Columns[is.Index.Columns[i].Offset].FieldType,
UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(),
})
}
}
is.NeedCommonHandle = is.Table.IsCommonHandle
if is.NeedCommonHandle {
for i := len(is.Index.Columns); i < len(idxExprCols); i++ {
indexCols = append(indexCols, idxExprCols[i])
}
}
setHandle := len(indexCols) > len(is.Index.Columns)
if !setHandle {
for i, col := range is.Columns {
if (mysql.HasPriKeyFlag(col.Flag) && is.Table.PKIsHandle) || col.ID == model.ExtraHandleID {
indexCols = append(indexCols, is.dataSourceSchema.Columns[i])
setHandle = true
break
}
}
}
if isDoubleRead {
// If it's double read case, the first index must return handle. So we should add extra handle column
// if there isn't a handle column.
if !setHandle {
if !is.Table.IsCommonHandle {
indexCols = append(indexCols, &expression.Column{
RetType: types.NewFieldType(mysql.TypeLonglong),
ID: model.ExtraHandleID,
UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(),
})
}
}
// If index is global, we should add extra column for pid.
if is.Index.Global {
indexCols = append(indexCols, &expression.Column{
RetType: types.NewFieldType(mysql.TypeLonglong),
ID: model.ExtraPidColID,
UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(),
})
}
}
is.SetSchema(expression.NewSchema(indexCols...))
}
func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSource, path *util.AccessPath, finalStats *property.StatsInfo) {
// Add filter condition to table plan now.
indexConds, tableConds := path.IndexFilters, path.TableFilters
tableConds, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(tableConds)
var newRootConds []expression.Expression
indexConds, newRootConds = expression.PushDownExprs(is.ctx.GetSessionVars().StmtCtx, indexConds, is.ctx.GetClient(), kv.TiKV)
copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...)
tableConds, newRootConds = expression.PushDownExprs(is.ctx.GetSessionVars().StmtCtx, tableConds, is.ctx.GetClient(), kv.TiKV)
copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...)
sessVars := is.ctx.GetSessionVars()
if indexConds != nil {
copTask.cst += copTask.count() * sessVars.CopCPUFactor
var selectivity float64
if path.CountAfterAccess > 0 {
selectivity = path.CountAfterIndex / path.CountAfterAccess
}
count := is.stats.RowCount * selectivity
stats := p.tableStats.ScaleByExpectCnt(count)
indexSel := PhysicalSelection{Conditions: indexConds}.Init(is.ctx, stats, is.blockOffset)
indexSel.SetChildren(is)
copTask.indexPlan = indexSel
}
if len(tableConds) > 0 {
copTask.finishIndexPlan()
copTask.cst += copTask.count() * sessVars.CopCPUFactor
tableSel := PhysicalSelection{Conditions: tableConds}.Init(is.ctx, finalStats, is.blockOffset)
tableSel.SetChildren(copTask.tablePlan)
copTask.tablePlan = tableSel
}
}
// SplitSelCondsWithVirtualColumn filter the select conditions which contain virtual column
func SplitSelCondsWithVirtualColumn(conds []expression.Expression) ([]expression.Expression, []expression.Expression) {
var filterConds []expression.Expression
for i := len(conds) - 1; i >= 0; i-- {
if expression.ContainVirtualColumn(conds[i : i+1]) {
filterConds = append(filterConds, conds[i])
conds = append(conds[:i], conds[i+1:]...)
}
}
return conds, filterConds
}
func matchIndicesProp(idxCols []*expression.Column, colLens []int, propItems []property.SortItem) bool {
if len(idxCols) < len(propItems) {
return false
}
for i, item := range propItems {
if colLens[i] != types.UnspecifiedLength || !item.Col.Equal(nil, idxCols[i]) {
return false
}
}
return true
}
func (ds *DataSource) splitIndexFilterConditions(conditions []expression.Expression, indexColumns []*expression.Column, idxColLens []int,
table *model.TableInfo) (indexConds, tableConds []expression.Expression) {
var indexConditions, tableConditions []expression.Expression
for _, cond := range conditions {
if ds.isCoveringIndex(expression.ExtractColumns(cond), indexColumns, idxColLens, table) {
indexConditions = append(indexConditions, cond)
} else {
tableConditions = append(tableConditions, cond)
}
}
return indexConditions, tableConditions
}
// getMostCorrCol4Handle checks if column in the condition is correlated enough with handle. If the condition
// contains multiple columns, return nil and get the max correlation, which would be used in the heuristic estimation.
func getMostCorrCol4Handle(exprs []expression.Expression, histColl *statistics.Table, threshold float64) (*expression.Column, float64) {
var cols []*expression.Column
cols = expression.ExtractColumnsFromExpressions(cols, exprs, nil)
if len(cols) == 0 {
return nil, 0
}
colSet := set.NewInt64Set()
var corr float64
var corrCol *expression.Column
for _, col := range cols {
if colSet.Exist(col.UniqueID) {
continue
}
colSet.Insert(col.UniqueID)
hist, ok := histColl.Columns[col.ID]
if !ok {
continue
}
curCorr := hist.Correlation
if corrCol == nil || math.Abs(corr) < math.Abs(curCorr) {
corrCol = col
corr = curCorr
}
}
if len(colSet) == 1 && math.Abs(corr) >= threshold {
return corrCol, corr
}
return nil, corr
}
// getColumnRangeCounts estimates row count for each range respectively.
func getColumnRangeCounts(sc *stmtctx.StatementContext, colID int64, ranges []*ranger.Range, histColl *statistics.Table, idxID int64) ([]float64, bool) {
var err error
var count float64
rangeCounts := make([]float64, len(ranges))
for i, ran := range ranges {
if idxID >= 0 {
idxHist := histColl.Indices[idxID]
if idxHist == nil || idxHist.IsInvalid(false) {
return nil, false
}
count, err = histColl.GetRowCountByIndexRanges(sc, idxID, []*ranger.Range{ran})
} else {
colHist, ok := histColl.Columns[colID]
if !ok || colHist.IsInvalid(sc, false) {
return nil, false
}
count, err = histColl.GetRowCountByColumnRanges(sc, colID, []*ranger.Range{ran})
}
if err != nil {
return nil, false
}
rangeCounts[i] = count
}
return rangeCounts, true
}
// convertRangeFromExpectedCnt builds new ranges used to estimate row count we need to scan in table scan before finding specified
// number of tuples which fall into input ranges.
func convertRangeFromExpectedCnt(ranges []*ranger.Range, rangeCounts []float64, expectedCnt float64, desc bool) ([]*ranger.Range, float64, bool) {
var i int
var count float64
var convertedRanges []*ranger.Range
if desc {
for i = len(ranges) - 1; i >= 0; i-- {
if count+rangeCounts[i] >= expectedCnt {
break
}
count += rangeCounts[i]
}
if i < 0 {
return nil, 0, true
}
convertedRanges = []*ranger.Range{{LowVal: ranges[i].HighVal, HighVal: []types.Datum{types.MaxValueDatum()}, LowExclude: !ranges[i].HighExclude}}
} else {
for i = 0; i < len(ranges); i++ {
if count+rangeCounts[i] >= expectedCnt {
break
}
count += rangeCounts[i]
}
if i == len(ranges) {
return nil, 0, true
}
convertedRanges = []*ranger.Range{{LowVal: []types.Datum{{}}, HighVal: ranges[i].LowVal, HighExclude: !ranges[i].LowExclude}}
}
return convertedRanges, count, false
}
// crossEstimateTableRowCount estimates row count of table scan using histogram of another column which is in TableFilters
// and has high order correlation with handle column. For example, if the query is like:
// `select * from tbl where a = 1 order by pk limit 1`
// if order of column `a` is strictly correlated with column `pk`, the row count of table scan should be:
// `1 + row_count(a < 1 or a is null)`
func (ds *DataSource) crossEstimateTableRowCount(path *util.AccessPath, expectedCnt float64, desc bool) (float64, bool, float64) {
if ds.statisticTable.Pseudo || len(path.TableFilters) == 0 {
return 0, false, 0
}
col, corr := getMostCorrCol4Handle(path.TableFilters, ds.statisticTable, ds.ctx.GetSessionVars().CorrelationThreshold)
return ds.crossEstimateRowCount(path, path.TableFilters, col, corr, expectedCnt, desc)
}
// crossEstimateRowCount is the common logic of crossEstimateTableRowCount and crossEstimateIndexRowCount.
func (ds *DataSource) crossEstimateRowCount(path *util.AccessPath, conds []expression.Expression, col *expression.Column, corr, expectedCnt float64, desc bool) (float64, bool, float64) {
// If the scan is not full range scan, we cannot use histogram of other columns for estimation, because
// the histogram reflects value distribution in the whole table level.
if col == nil || len(path.AccessConds) > 0 {
return 0, false, corr
}
colInfoID, colID := col.ID, col.UniqueID
if corr < 0 {
desc = !desc
}
accessConds, remained := ranger.DetachCondsForColumn(ds.ctx, conds, col)
if len(accessConds) == 0 {
return 0, false, corr
}
sc := ds.ctx.GetSessionVars().StmtCtx
ranges, err := ranger.BuildColumnRange(accessConds, sc, col.RetType, types.UnspecifiedLength)
if len(ranges) == 0 || err != nil {
return 0, err == nil, corr
}
idxID, idxExists := ds.stats.HistColl.ColID2IdxID[colID]
if !idxExists {
idxID = -1
}
rangeCounts, ok := getColumnRangeCounts(sc, colInfoID, ranges, ds.statisticTable, idxID)
if !ok {
return 0, false, corr
}
convertedRanges, count, isFull := convertRangeFromExpectedCnt(ranges, rangeCounts, expectedCnt, desc)
if isFull {
return path.CountAfterAccess, true, 0
}
var rangeCount float64
if idxExists {
rangeCount, err = ds.statisticTable.GetRowCountByIndexRanges(sc, idxID, convertedRanges)
} else {
rangeCount, err = ds.statisticTable.GetRowCountByColumnRanges(sc, colInfoID, convertedRanges)
}
if err != nil {
return 0, false, corr
}
scanCount := rangeCount + expectedCnt - count
if len(remained) > 0 {
scanCount = scanCount / SelectionFactor
}
scanCount = math.Min(scanCount, path.CountAfterAccess)
return scanCount, true, 0
}
// crossEstimateIndexRowCount estimates row count of index scan using histogram of another column which is in TableFilters/IndexFilters
// and has high order correlation with the first index column. For example, if the query is like:
// `select * from tbl where a = 1 order by b limit 1`
// if order of column `a` is strictly correlated with column `b`, the row count of IndexScan(b) should be:
// `1 + row_count(a < 1 or a is null)`
func (ds *DataSource) crossEstimateIndexRowCount(path *util.AccessPath, expectedCnt float64, desc bool) (float64, bool, float64) {
filtersLen := len(path.TableFilters) + len(path.IndexFilters)
sessVars := ds.ctx.GetSessionVars()
if ds.statisticTable.Pseudo || filtersLen == 0 || !sessVars.EnableExtendedStats {
return 0, false, 0
}
col, corr := getMostCorrCol4Index(path, ds.statisticTable, sessVars.CorrelationThreshold)
filters := make([]expression.Expression, 0, filtersLen)
filters = append(filters, path.TableFilters...)
filters = append(filters, path.IndexFilters...)
return ds.crossEstimateRowCount(path, filters, col, corr, expectedCnt, desc)
}
// getMostCorrCol4Index checks if column in the condition is correlated enough with the first index column. If the condition
// contains multiple columns, return nil and get the max correlation, which would be used in the heuristic estimation.
func getMostCorrCol4Index(path *util.AccessPath, histColl *statistics.Table, threshold float64) (*expression.Column, float64) {
if histColl.ExtendedStats == nil || len(histColl.ExtendedStats.Stats) == 0 {
return nil, 0
}
var cols []*expression.Column
cols = expression.ExtractColumnsFromExpressions(cols, path.TableFilters, nil)
cols = expression.ExtractColumnsFromExpressions(cols, path.IndexFilters, nil)
if len(cols) == 0 {
return nil, 0
}
colSet := set.NewInt64Set()
var corr float64
var corrCol *expression.Column
for _, col := range cols {
if colSet.Exist(col.UniqueID) {
continue
}
colSet.Insert(col.UniqueID)
curCorr := float64(0)
for _, item := range histColl.ExtendedStats.Stats {
if (col.ID == item.ColIDs[0] && path.FullIdxCols[0].ID == item.ColIDs[1]) ||
(col.ID == item.ColIDs[1] && path.FullIdxCols[0].ID == item.ColIDs[0]) {
curCorr = item.ScalarVals
break
}
}
if corrCol == nil || math.Abs(corr) < math.Abs(curCorr) {
corrCol = col
corr = curCorr
}
}
if len(colSet) == 1 && math.Abs(corr) >= threshold {
return corrCol, corr
}
return nil, corr
}
// GetPhysicalScan returns PhysicalTableScan for the LogicalTableScan.
func (s *LogicalTableScan) GetPhysicalScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan {
ds := s.Source
ts := PhysicalTableScan{
Table: ds.tableInfo,
Columns: ds.Columns,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
Ranges: s.Ranges,
AccessCondition: s.AccessConds,
}.Init(s.ctx, s.blockOffset)
ts.stats = stats
ts.SetSchema(schema.Clone())
if ts.Table.PKIsHandle {
if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil {
if ds.statisticTable.Columns[pkColInfo.ID] != nil {
ts.Hist = &ds.statisticTable.Columns[pkColInfo.ID].Histogram
}
}
}
return ts
}
// GetPhysicalIndexScan returns PhysicalIndexScan for the logical IndexScan.
func (s *LogicalIndexScan) GetPhysicalIndexScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan {
ds := s.Source
is := PhysicalIndexScan{
Table: ds.tableInfo,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
Columns: s.Columns,
Index: s.Index,
IdxCols: s.IdxCols,
IdxColLens: s.IdxColLens,
AccessCondition: s.AccessConds,
Ranges: s.Ranges,
dataSourceSchema: ds.schema,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
}.Init(ds.ctx, ds.blockOffset)
is.stats = stats
is.initSchema(s.FullIdxCols, s.IsDoubleRead)
return is
}
// convertToTableScan converts the DataSource to table scan.
func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) {
// It will be handled in convertToIndexScan.
if prop.TaskTp == property.CopDoubleReadTaskType {
return invalidTask, nil
}
if !prop.IsEmpty() && !candidate.isMatchProp {
return invalidTask, nil
}
ts, cost, _ := ds.getOriginalPhysicalTableScan(prop, candidate.path, candidate.isMatchProp)
if ts.KeepOrder && ts.Desc && ts.StoreType == kv.TiFlash {
return invalidTask, nil
}
if prop.TaskTp == property.MppTaskType {
if ts.KeepOrder {
return &mppTask{}, nil
}
if prop.PartitionTp != property.AnyType || ts.isPartition {
// If ts is a single partition, then this partition table is in static-only prune, then we should not choose mpp execution.
return &mppTask{}, nil
}
for _, col := range ts.schema.Columns {
if col.VirtualExpr != nil {
return &mppTask{}, nil
}
}
mppTask := &mppTask{
p: ts,
cst: cost,
partTp: property.AnyType,
}
ts.PartitionInfo = PartitionInfo{
PruningConds: ds.allConds,
PartitionNames: ds.partitionNames,
Columns: ds.TblCols,
ColumnNames: ds.names,
}
ts.cost = cost
mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.stats)
return mppTask, nil
}
copTask := &copTask{
tablePlan: ts,
indexPlanFinished: true,
tblColHists: ds.TblColHists,
cst: cost,
}
copTask.partitionInfo = PartitionInfo{
PruningConds: ds.allConds,
PartitionNames: ds.partitionNames,
Columns: ds.TblCols,
ColumnNames: ds.names,
}
ts.PartitionInfo = copTask.partitionInfo
task = copTask
if candidate.isMatchProp {
copTask.keepOrder = true
// TableScan on partition table can't keep order.
if ds.tableInfo.GetPartitionInfo() != nil {
return invalidTask, nil
}
}
ts.cost = task.cost()
ts.addPushedDownSelection(copTask, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt))
if prop.IsFlashProp() && len(copTask.rootTaskConds) != 0 {
return invalidTask, nil
}
if prop.TaskTp == property.RootTaskType {
task = task.convertToRootTask(ds.ctx)
} else if _, ok := task.(*rootTask); ok {
return invalidTask, nil
}
return task, nil
}
func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) {
if prop.TaskTp == property.CopDoubleReadTaskType {
return invalidTask, nil
}
if !prop.IsEmpty() && !candidate.isMatchProp {
return invalidTask, nil
}
p := PhysicalTableSample{
TableSampleInfo: ds.SampleInfo,
TableInfo: ds.table,
Desc: candidate.isMatchProp && prop.SortItems[0].Desc,
}.Init(ds.ctx, ds.SelectBlockOffset())
p.schema = ds.schema
return &rootTask{
p: p,
}, nil
}
func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath) task {
if !prop.IsEmpty() && !candidate.isMatchProp {
return invalidTask
}
if prop.TaskTp == property.CopDoubleReadTaskType && candidate.isSingleScan ||
prop.TaskTp == property.CopSingleReadTaskType && !candidate.isSingleScan {
return invalidTask
}
if tidbutil.IsMemDB(ds.DBName.L) {
return invalidTask
}
accessCnt := math.Min(candidate.path.CountAfterAccess, float64(1))
pointGetPlan := PointGetPlan{
ctx: ds.ctx,
AccessConditions: candidate.path.AccessConds,
schema: ds.schema.Clone(),
dbName: ds.DBName.L,
TblInfo: ds.TableInfo(),
outputNames: ds.OutputNames(),
LockWaitTime: ds.ctx.GetSessionVars().LockWaitTimeout,
Columns: ds.Columns,
}.Init(ds.ctx, ds.tableStats.ScaleByExpectCnt(accessCnt), ds.blockOffset)
var partitionInfo *model.PartitionDefinition
if ds.isPartition {
if pi := ds.tableInfo.GetPartitionInfo(); pi != nil {
for _, def := range pi.Definitions {
if def.ID == ds.physicalTableID {
partitionInfo = &def
break
}
}
}
if partitionInfo == nil {
return invalidTask
}
}
rTsk := &rootTask{p: pointGetPlan}
var cost float64
if candidate.path.IsIntHandlePath {
pointGetPlan.Handle = kv.IntHandle(candidate.path.Ranges[0].LowVal[0].GetInt64())
pointGetPlan.UnsignedHandle = mysql.HasUnsignedFlag(ds.handleCols.GetCol(0).RetType.Flag)
pointGetPlan.PartitionInfo = partitionInfo
cost = pointGetPlan.GetCost(ds.TblCols)
// Add filter condition to table plan now.
if len(candidate.path.TableFilters) > 0 {
sessVars := ds.ctx.GetSessionVars()
cost += pointGetPlan.stats.RowCount * sessVars.CPUFactor
sel := PhysicalSelection{
Conditions: candidate.path.TableFilters,
}.Init(ds.ctx, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt), ds.blockOffset)
sel.SetChildren(pointGetPlan)
rTsk.p = sel
}
} else {
pointGetPlan.IndexInfo = candidate.path.Index
pointGetPlan.IdxCols = candidate.path.IdxCols
pointGetPlan.IdxColLens = candidate.path.IdxColLens
pointGetPlan.IndexValues = candidate.path.Ranges[0].LowVal
pointGetPlan.PartitionInfo = partitionInfo
if candidate.isSingleScan {
cost = pointGetPlan.GetCost(candidate.path.IdxCols)
} else {
cost = pointGetPlan.GetCost(ds.TblCols)
}
// Add index condition to table plan now.
if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 {
sessVars := ds.ctx.GetSessionVars()
cost += pointGetPlan.stats.RowCount * sessVars.CPUFactor
sel := PhysicalSelection{
Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...),
}.Init(ds.ctx, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt), ds.blockOffset)
sel.SetChildren(pointGetPlan)
rTsk.p = sel
}
}
rTsk.cst = cost
pointGetPlan.SetCost(cost)
return rTsk
}
func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, candidate *candidatePath, hashPartColName *ast.ColumnName) task {
if !prop.IsEmpty() && !candidate.isMatchProp {
return invalidTask
}
if prop.TaskTp == property.CopDoubleReadTaskType && candidate.isSingleScan ||
prop.TaskTp == property.CopSingleReadTaskType && !candidate.isSingleScan {
return invalidTask
}
accessCnt := math.Min(candidate.path.CountAfterAccess, float64(len(candidate.path.Ranges)))
batchPointGetPlan := BatchPointGetPlan{
ctx: ds.ctx,
AccessConditions: candidate.path.AccessConds,
TblInfo: ds.TableInfo(),
KeepOrder: !prop.IsEmpty(),
Columns: ds.Columns,
SinglePart: ds.isPartition,
PartTblID: ds.physicalTableID,
}.Init(ds.ctx, ds.tableStats.ScaleByExpectCnt(accessCnt), ds.schema.Clone(), ds.names, ds.blockOffset)
if batchPointGetPlan.KeepOrder {
batchPointGetPlan.Desc = prop.SortItems[0].Desc
}
rTsk := &rootTask{p: batchPointGetPlan}
var cost float64
if candidate.path.IsIntHandlePath {
for _, ran := range candidate.path.Ranges {
batchPointGetPlan.Handles = append(batchPointGetPlan.Handles, kv.IntHandle(ran.LowVal[0].GetInt64()))
}
cost = batchPointGetPlan.GetCost(ds.TblCols)
// Add filter condition to table plan now.
if len(candidate.path.TableFilters) > 0 {
sessVars := ds.ctx.GetSessionVars()
cost += batchPointGetPlan.stats.RowCount * sessVars.CPUFactor
sel := PhysicalSelection{
Conditions: candidate.path.TableFilters,
}.Init(ds.ctx, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt), ds.blockOffset)
sel.SetChildren(batchPointGetPlan)
rTsk.p = sel
}
} else {
batchPointGetPlan.IndexInfo = candidate.path.Index
batchPointGetPlan.IdxCols = candidate.path.IdxCols
batchPointGetPlan.IdxColLens = candidate.path.IdxColLens
batchPointGetPlan.PartitionColPos = getPartitionColumnPos(candidate.path.Index, hashPartColName)
for _, ran := range candidate.path.Ranges {
batchPointGetPlan.IndexValues = append(batchPointGetPlan.IndexValues, ran.LowVal)
}
if !prop.IsEmpty() {
batchPointGetPlan.KeepOrder = true
batchPointGetPlan.Desc = prop.SortItems[0].Desc
}
if candidate.isSingleScan {
cost = batchPointGetPlan.GetCost(candidate.path.IdxCols)
} else {
cost = batchPointGetPlan.GetCost(ds.TblCols)
}
// Add index condition to table plan now.
if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 {
sessVars := ds.ctx.GetSessionVars()
cost += batchPointGetPlan.stats.RowCount * sessVars.CPUFactor
sel := PhysicalSelection{
Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...),
}.Init(ds.ctx, ds.stats.ScaleByExpectCnt(prop.ExpectedCnt), ds.blockOffset)
sel.SetChildren(batchPointGetPlan)
rTsk.p = sel
}
}
rTsk.cst = cost
batchPointGetPlan.SetCost(cost)
return rTsk
}
func (ts *PhysicalTableScan) addPushedDownSelectionToMppTask(mpp *mppTask, stats *property.StatsInfo) *mppTask {
filterCondition, rootTaskConds := SplitSelCondsWithVirtualColumn(ts.filterCondition)
var newRootConds []expression.Expression
filterCondition, newRootConds = expression.PushDownExprs(ts.ctx.GetSessionVars().StmtCtx, filterCondition, ts.ctx.GetClient(), ts.StoreType)
rootTaskConds = append(rootTaskConds, newRootConds...)
if len(rootTaskConds) > 0 {
return &mppTask{}
}
ts.filterCondition = filterCondition
// Add filter condition to table plan now.
sessVars := ts.ctx.GetSessionVars()
if len(ts.filterCondition) > 0 {
mpp.cst += mpp.count() * sessVars.CopCPUFactor
sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.ctx, stats, ts.blockOffset)
sel.SetChildren(ts)
sel.cost = mpp.cst
mpp.p = sel
}
return mpp
}
func (ts *PhysicalTableScan) addPushedDownSelection(copTask *copTask, stats *property.StatsInfo) {
ts.filterCondition, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(ts.filterCondition)
var newRootConds []expression.Expression
ts.filterCondition, newRootConds = expression.PushDownExprs(ts.ctx.GetSessionVars().StmtCtx, ts.filterCondition, ts.ctx.GetClient(), ts.StoreType)
copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...)
// Add filter condition to table plan now.
sessVars := ts.ctx.GetSessionVars()
if len(ts.filterCondition) > 0 {
copTask.cst += copTask.count() * sessVars.CopCPUFactor
sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.ctx, stats, ts.blockOffset)
sel.SetChildren(ts)
sel.cost = copTask.cst
copTask.tablePlan = sel
}
}
func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64, float64) {
ts := PhysicalTableScan{
Table: ds.tableInfo,
Columns: ds.Columns,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
Ranges: path.Ranges,
AccessCondition: path.AccessConds,
StoreType: path.StoreType,
IsGlobalRead: path.IsTiFlashGlobalRead,
}.Init(ds.ctx, ds.blockOffset)
ts.filterCondition = make([]expression.Expression, len(path.TableFilters))
copy(ts.filterCondition, path.TableFilters)
ts.SetSchema(ds.schema.Clone())
if ts.Table.PKIsHandle {
if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil {
if ds.statisticTable.Columns[pkColInfo.ID] != nil {
ts.Hist = &ds.statisticTable.Columns[pkColInfo.ID].Histogram
}
}
}
rowCount := path.CountAfterAccess
if prop.ExpectedCnt < ds.stats.RowCount {
count, ok, corr := ds.crossEstimateTableRowCount(path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc)
if ok {
// TODO: actually, before using this count as the estimated row count of table scan, we need additionally
// check if count < row_count(first_region | last_region), and use the larger one since we build one copTask
// for one region now, so even if it is `limit 1`, we have to scan at least one region in table scan.
// Currently, we can use `tikvrpc.CmdDebugGetRegionProperties` interface as `getSampRegionsRowCount()` does
// to get the row count in a region, but that result contains MVCC old version rows, so it is not that accurate.
// Considering that when this scenario happens, the execution time is close between IndexScan and TableScan,
// we do not add this check temporarily.
rowCount = count
} else if abs := math.Abs(corr); abs < 1 {
correlationFactor := math.Pow(1-abs, float64(ds.ctx.GetSessionVars().CorrelationExpFactor))
selectivity := ds.stats.RowCount / rowCount
rowCount = math.Min(prop.ExpectedCnt/selectivity/correlationFactor, rowCount)
}
}
// We need NDV of columns since it may be used in cost estimation of join. Precisely speaking,
// we should track NDV of each histogram bucket, and sum up the NDV of buckets we actually need
// to scan, but this would only help improve accuracy of NDV for one column, for other columns,
// we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption
// for all columns now, as we do in `deriveStatsByFilter`.
ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount)
var rowSize float64
if ts.StoreType == kv.TiKV {
rowSize = ds.TblColHists.GetTableAvgRowSize(ds.ctx, ds.TblCols, ts.StoreType, true)
} else {
// If `ds.handleCol` is nil, then the schema of tableScan doesn't have handle column.
// This logic can be ensured in column pruning.
rowSize = ds.TblColHists.GetTableAvgRowSize(ds.ctx, ts.Schema().Columns, ts.StoreType, ds.handleCols != nil)
}
sessVars := ds.ctx.GetSessionVars()
cost := rowCount * rowSize * sessVars.ScanFactor
if ts.IsGlobalRead {
cost += rowCount * sessVars.NetworkFactor * rowSize
}
if isMatchProp {
ts.Desc = prop.SortItems[0].Desc
if prop.SortItems[0].Desc && prop.ExpectedCnt >= smallScanThreshold {
cost = rowCount * rowSize * sessVars.DescScanFactor
}
ts.KeepOrder = true
}
switch ts.StoreType {
case kv.TiKV:
cost += float64(len(ts.Ranges)) * sessVars.SeekFactor
case kv.TiFlash:
cost += float64(len(ts.Ranges)) * float64(len(ts.Columns)) * sessVars.SeekFactor
}
return ts, cost, rowCount
}
func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool, isSingleScan bool) (*PhysicalIndexScan, float64, float64) {
idx := path.Index
is := PhysicalIndexScan{
Table: ds.tableInfo,
TableAsName: ds.TableAsName,
DBName: ds.DBName,
Columns: ds.Columns,
Index: idx,
IdxCols: path.IdxCols,
IdxColLens: path.IdxColLens,
AccessCondition: path.AccessConds,
Ranges: path.Ranges,
dataSourceSchema: ds.schema,
isPartition: ds.isPartition,
physicalTableID: ds.physicalTableID,
}.Init(ds.ctx, ds.blockOffset)
statsTbl := ds.statisticTable
if statsTbl.Indices[idx.ID] != nil {
is.Hist = &statsTbl.Indices[idx.ID].Histogram
}
rowCount := path.CountAfterAccess
is.initSchema(append(path.FullIdxCols, ds.commonHandleCols...), !isSingleScan)
if (isMatchProp || prop.IsEmpty()) && prop.ExpectedCnt < ds.stats.RowCount {
count, ok, corr := ds.crossEstimateIndexRowCount(path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc)
if ok {
rowCount = count
} else if abs := math.Abs(corr); abs < 1 {
correlationFactor := math.Pow(1-abs, float64(ds.ctx.GetSessionVars().CorrelationExpFactor))
selectivity := ds.stats.RowCount / rowCount
rowCount = math.Min(prop.ExpectedCnt/selectivity/correlationFactor, rowCount)
}
}
is.stats = ds.tableStats.ScaleByExpectCnt(rowCount)
rowSize := is.indexScanRowSize(idx, ds, true)
sessVars := ds.ctx.GetSessionVars()
cost := rowCount * rowSize * sessVars.ScanFactor
if isMatchProp {
is.Desc = prop.SortItems[0].Desc
if prop.SortItems[0].Desc && prop.ExpectedCnt >= smallScanThreshold {
cost = rowCount * rowSize * sessVars.DescScanFactor
}
is.KeepOrder = true
}
cost += float64(len(is.Ranges)) * sessVars.SeekFactor
is.cost = cost
return is, cost, rowCount
}
| planner/core/find_best_task.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0058073862455785275,
0.00023206137120723724,
0.0001612669148016721,
0.0001695999235380441,
0.000445679877884686
] |
{
"id": 3,
"code_window": [
"\t\"github.com/pingcap/tidb/planner/property\"\n",
"\t\"github.com/pingcap/tidb/privilege\"\n",
"\t\"github.com/pingcap/tidb/sessionctx\"\n",
"\t\"github.com/pingcap/tidb/sessionctx/stmtctx\"\n",
"\t\"github.com/pingcap/tidb/table\"\n",
"\t\"github.com/pingcap/tidb/table/tables\"\n",
"\t\"github.com/pingcap/tidb/types\"\n",
"\tdriver \"github.com/pingcap/tidb/types/parser_driver\"\n",
"\ttidbutil \"github.com/pingcap/tidb/util\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/store/tikv\"\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "add",
"edit_start_line_idx": 35
} | # Proposal: Infer the System Timezone of a TiDB cluster via TZ environment variable
- Author(s): [Zhexuan Yang](www.github.com/zhexuany)
- Last updated: 2018/09/09
- Discussion at: Not applicable
## Abstract
When it comes to time-related calculation, it is hard for the distributed system. This proposal tries to resolve two problems: 1. timezone may be inconsistent across multiple `TiDB` instances, 2. performance degradation caused by pushing `System` down to `TiKV`. The impact of this proposal is changing the way of `TiDB` inferring system's timezone name. Before this proposal, the default timezone name pushed down to TiKV is `System` when session's timezone is not set. After this, TiDB evaluates system's timezone name via `TZ` environment variable and the path of the soft link of `/etc/localtime`. If both of them are failed, `TiDB` then push `UTC` to `TiKV`.
## Background
After we solved the daylight saving time issue, we found the performance degradation of TiKV side. Thanks for the investigation done by engineers from TiKV. The root cause of such performance degradation is that TiKV infers `System` timezone name via a third party lib, which calls a syscall and costs a lot. In our internal benchmark system, after [this PR](https://github.com/pingcap/tidb/pull/6823), our codebase is 1000 times slower than before. We have to address this.
Another problem needs also to be addressed is the potentially incosistent timezone name across multiple `TiDB` instances. `TiDB` instances may reside at different timezone which could cause incorrect calculation when it comes to time-related calculation. Just getting `TiDB`'s system timezone could be broken. We need find a way to ensure the uniqueness of global timezone name across multiple `TiDB`'s timezone name and also to leverage to resolve the performance degradation.
## Proposal
Firstly, we need to introduce the `TZ` environment. In POSIX system, the value of `TZ` variable can be one of the following three formats. A detailed description can be found in [this link](http://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html)
* std offset
* std offset dst [offset], start[/time], end[/time]
* :characters
The std means the IANA timezone name; the offset means timezone offset; the dst indicates the leading timezone having daylight saving time.
In our case, which means both `TiDB` and `TiKV`, we need care the first and third formats. For answering why we do not need the second format, we need to review how Golang evaluates timezone. In `time` package, the method [LoadLocation](https://golang.org/pkg/time/#LoadLocation) reads tzData from pre-specified sources(directories may contain tzData) and then builds `time.Location` from such tzData which already contains daylight saving time information.
In this proposal, we suggest setting `TZ` to a valid IANA timezone name which can be read from `TiDB` later. If `TiDB` can't get `TZ` or the supply of `TZ` is invalid, `TiDB` just falls back to evaluate the path of the soft link of `/etc/localtime`. In addition, a warning message telling the user you should set `TZ` properly will be printed. Setting `TZ` can be done in our `tidb-ansible` project, it is also can be done at user side by `export TZ="Asia/Shanghai"`. If both of them are failed, `TiDB` will use `UTC` as timezone name.
The positive side of this change is resolving performance degradation issue and ensuring the uniqueness of global timezone name in multiple `TiDB` instances.
The negative side is just adding a config item which is a very small matter and the user probably does not care it if we can take care of it and more importantly guarantee the correctness.
## Rationale
We tried to read system timezone name by checking the path of the soft link of `/etc/localtime` but, sadly, failed at a corner case. The failed case is docker. In docker image, it copies the real timezone file and links to `/usr/share/zoneinfo/utc`. The timezone data is correct but the path is not. Regarding of `UTC`, Golang just returns `UTC` instance and will not further read tzdata from sources. This leads to a fallback solution. When we cannot evaluate from the path, we fall back to `UTC`.
## Compatibility
It does not have compatibility issue as long as the user deploys by `tidb-ansible`. We may mention this in our release-node and the message printed before tidb quits, which must be easy to understand.
The upgrading process need to be handled in particular. `TZ` environment variable has to be set before we start new `TiDB` binary. In this way, the following bootstrap process can benefit from this and avoid any hazard happening.
## Implementation
The implementation is relatively easy. We just get `TZ` environment from system and check whether it is valid or not. If it is invalid, TiDB evaluates the path of soft link of `/etc/localtime`. In addition, a warning message needs to be printed indicating user has to set `TZ` variable properly. For example, if `/etc/localtime` links to `/usr/share/zoneinfo/Asia/Shanghai`, then timezone name `TiDB` gets should be `Asia/Shanghai`.
In order to ensure the uniqueness of global timezone across multiple `TiDB` instances, we need to write timezone name into `variable_value` with variable name `system_tz` in `mysql.tidb`. This cached value can be read once `TiDB` finishes its bootstrap stage. A method `loadLocalStr` can do this job.
## Open issues (if applicable)
PR of this proposal: https://github.com/pingcap/tidb/pull/7638/files
PR of change TZ loading logic of golang: https://github.com/golang/go/pull/27570
| docs/design/2018-09-10-adding-tz-env.md | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00023084630083758384,
0.00017703959019854665,
0.000162969168741256,
0.00016704024164937437,
0.0000241946672758786
] |
{
"id": 4,
"code_window": [
"\t\t\t\tlock = true\n",
"\t\t\t\twaitTime = sessVars.LockWaitTimeout\n",
"\t\t\t\tif lockInfo.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\t\t\t\twaitTime = int64(lockInfo.WaitSec * 1000)\n",
"\t\t\t\t} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\t\t\t\twaitTime = kv.LockNoWait\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\twaitTime = tikv.LockNoWait\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "replace",
"edit_start_line_idx": 518
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"encoding/hex"
"math/rand"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
type actionPessimisticLock struct {
*kv.LockCtx
}
type actionPessimisticRollback struct{}
var (
_ twoPhaseCommitAction = actionPessimisticLock{}
_ twoPhaseCommitAction = actionPessimisticRollback{}
)
func (actionPessimisticLock) String() string {
return "pessimistic_lock"
}
func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticLock
}
func (actionPessimisticRollback) String() string {
return "pessimistic_rollback"
}
func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticRollback
}
func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
m := batch.mutations
mutations := make([]*pb.Mutation, m.Len())
for i := 0; i < m.Len(); i++ {
mut := &pb.Mutation{
Op: pb.Op_PessimisticLock,
Key: m.GetKey(i),
}
if c.txn.us.HasPresumeKeyNotExists(m.GetKey(i)) || (c.doingAmend && m.GetOp(i) == pb.Op_Insert) {
mut.Assertion = pb.Assertion_NotExist
}
mutations[i] = mut
}
elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond)
ttl := elapsed + atomic.LoadUint64(&ManagedLockTTL)
failpoint.Inject("shortPessimisticLockTTL", func() {
ttl = 1
keys := make([]string, 0, len(mutations))
for _, m := range mutations {
keys = append(keys, hex.EncodeToString(m.Key))
}
logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys))
})
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &pb.PessimisticLockRequest{
Mutations: mutations,
PrimaryLock: c.primary(),
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
LockTtl: ttl,
IsFirstLock: c.isFirstLock,
WaitTimeout: action.LockWaitTime,
ReturnValues: action.ReturnValues,
MinCommitTs: c.forUpdateTS + 1,
}, pb.Context{Priority: c.priority, SyncLog: c.syncLog})
lockWaitStartTime := action.WaitStartTime
for {
// if lockWaitTime set, refine the request `WaitTimeout` field based on timeout limit
if action.LockWaitTime > 0 {
timeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()
if timeLeft <= 0 {
req.PessimisticLock().WaitTimeout = tidbkv.LockNoWait
} else {
req.PessimisticLock().WaitTimeout = timeLeft
}
}
failpoint.Inject("PessimisticLockErrWriteConflict", func() error {
time.Sleep(300 * time.Millisecond)
return &kv.ErrWriteConflict{WriteConflict: nil}
})
startTime := time.Now()
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.LockRPCTime, int64(time.Since(startTime)))
atomic.AddInt64(&action.LockCtx.Stats.LockRPCCount, 1)
}
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticLockMutations(bo, action.LockCtx, batch.mutations)
return errors.Trace(err)
}
if resp.Resp == nil {
return errors.Trace(kv.ErrBodyMissing)
}
lockResp := resp.Resp.(*pb.PessimisticLockResponse)
keyErrs := lockResp.GetErrors()
if len(keyErrs) == 0 {
if action.ReturnValues {
action.ValuesLock.Lock()
for i, mutation := range mutations {
action.Values[string(mutation.Key)] = kv.ReturnedValue{Value: lockResp.Values[i]}
}
action.ValuesLock.Unlock()
}
return nil
}
var locks []*Lock
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
e := &kv.ErrKeyExist{AlreadyExist: alreadyExist}
return c.extractKeyExistsErr(e)
}
if deadlock := keyErr.Deadlock; deadlock != nil {
return &kv.ErrDeadlock{Deadlock: deadlock}
}
// Extract lock from key error
lock, err1 := extractLockFromKeyErr(keyErr)
if err1 != nil {
return errors.Trace(err1)
}
locks = append(locks, lock)
}
// Because we already waited on tikv, no need to Backoff here.
// tikv default will wait 3s(also the maximum wait value) when lock error occurs
startTime = time.Now()
msBeforeTxnExpired, _, err := c.store.lockResolver.ResolveLocks(bo, 0, locks)
if err != nil {
return errors.Trace(err)
}
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.ResolveLockTime, int64(time.Since(startTime)))
}
// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring
// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.
if msBeforeTxnExpired > 0 {
if action.LockWaitTime == tidbkv.LockNoWait {
return kv.ErrLockAcquireFailAndNoWaitSet
} else if action.LockWaitTime == tidbkv.LockAlwaysWait {
// do nothing but keep wait
} else {
// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock
if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime {
return errors.Trace(kv.ErrLockWaitTimeout)
}
}
if action.LockCtx.PessimisticLockWaited != nil {
atomic.StoreInt32(action.LockCtx.PessimisticLockWaited, 1)
}
}
// Handle the killed flag when waiting for the pessimistic lock.
// When a txn runs into LockKeys() and backoff here, it has no chance to call
// executor.Next() and check the killed flag.
if action.Killed != nil {
// Do not reset the killed flag here!
// actionPessimisticLock runs on each region parallelly, we have to consider that
// the error may be dropped.
if atomic.LoadUint32(action.Killed) == 1 {
return errors.Trace(kv.ErrQueryInterrupted)
}
}
}
}
func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &pb.PessimisticRollbackRequest{
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
Keys: batch.mutations.GetKeys(),
})
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticRollbackMutations(bo, batch.mutations)
return errors.Trace(err)
}
return nil
}
func (c *twoPhaseCommitter) pessimisticLockMutations(bo *Backoffer, lockCtx *kv.LockCtx, mutations CommitterMutations) error {
if c.sessionID > 0 {
failpoint.Inject("beforePessimisticLock", func(val failpoint.Value) {
// Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like
// `return("delay,fail")`. Then they will be executed sequentially at once.
if v, ok := val.(string); ok {
for _, action := range strings.Split(v, ",") {
if action == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 5))
logutil.Logger(bo.ctx).Info("[failpoint] injected delay at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
} else if action == "fail" {
logutil.Logger(bo.ctx).Info("[failpoint] injected failure at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS))
failpoint.Return(errors.New("injected failure at pessimistic lock"))
}
}
}
})
}
return c.doActionOnMutations(bo, actionPessimisticLock{lockCtx}, mutations)
}
func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *Backoffer, mutations CommitterMutations) error {
return c.doActionOnMutations(bo, actionPessimisticRollback{}, mutations)
}
| store/tikv/pessimistic.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.007973898202180862,
0.0013367527863010764,
0.0001623280404601246,
0.00017592048970982432,
0.0021019577980041504
] |
{
"id": 4,
"code_window": [
"\t\t\t\tlock = true\n",
"\t\t\t\twaitTime = sessVars.LockWaitTimeout\n",
"\t\t\t\tif lockInfo.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\t\t\t\twaitTime = int64(lockInfo.WaitSec * 1000)\n",
"\t\t\t\t} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\t\t\t\twaitTime = kv.LockNoWait\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\twaitTime = tikv.LockNoWait\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "replace",
"edit_start_line_idx": 518
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package property
import (
"fmt"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/util/codec"
)
// wholeTaskTypes records all possible kinds of task that a plan can return. For Agg, TopN and Limit, we will try to get
// these tasks one by one.
var wholeTaskTypes = []TaskType{CopSingleReadTaskType, CopDoubleReadTaskType, RootTaskType}
// SortItem wraps the column and its order.
type SortItem struct {
Col *expression.Column
Desc bool
}
// PartitionType is the way to partition during mpp data exchanging.
type PartitionType int
const (
// AnyType will not require any special partition types.
AnyType PartitionType = iota
// BroadcastType requires current task to broadcast its data.
BroadcastType
// HashType requires current task to shuffle its data according to some columns.
HashType
)
// PhysicalProperty stands for the required physical property by parents.
// It contains the orders and the task types.
type PhysicalProperty struct {
// SortItems contains the required sort attributes.
SortItems []SortItem
// TaskTp means the type of task that an operator requires.
//
// It needs to be specified because two different tasks can't be compared
// with cost directly. e.g. If a copTask takes less cost than a rootTask,
// we can't sure that we must choose the former one. Because the copTask
// must be finished and increase its cost in sometime, but we can't make
// sure the finishing time. So the best way to let the comparison fair is
// to add TaskType to required property.
TaskTp TaskType
// ExpectedCnt means this operator may be closed after fetching ExpectedCnt
// records.
ExpectedCnt float64
// hashcode stores the hash code of a PhysicalProperty, will be lazily
// calculated when function "HashCode()" being called.
hashcode []byte
// indicates that whether we are allowed to add an enforcer.
CanAddEnforcer bool
// If the partition type is hash, the data should be reshuffled by partition cols.
PartitionCols []*expression.Column
// which types the exchange sender belongs to, only take effects when it's a mpp task.
PartitionTp PartitionType
}
// NewPhysicalProperty builds property from columns.
func NewPhysicalProperty(taskTp TaskType, cols []*expression.Column, desc bool, expectCnt float64, enforced bool) *PhysicalProperty {
return &PhysicalProperty{
SortItems: SortItemsFromCols(cols, desc),
TaskTp: taskTp,
ExpectedCnt: expectCnt,
CanAddEnforcer: enforced,
}
}
// SortItemsFromCols builds property items from columns.
func SortItemsFromCols(cols []*expression.Column, desc bool) []SortItem {
items := make([]SortItem, 0, len(cols))
for _, col := range cols {
items = append(items, SortItem{Col: col, Desc: desc})
}
return items
}
// IsSubsetOf check if the keys can match the needs of partition.
func (p *PhysicalProperty) IsSubsetOf(keys []*expression.Column) []int {
if len(p.PartitionCols) > len(keys) {
return nil
}
matches := make([]int, 0, len(keys))
for _, partCol := range p.PartitionCols {
found := false
for i, key := range keys {
if partCol.Equal(nil, key) {
found = true
matches = append(matches, i)
break
}
}
if !found {
return nil
}
}
return matches
}
// AllColsFromSchema checks whether all the columns needed by this physical
// property can be found in the given schema.
func (p *PhysicalProperty) AllColsFromSchema(schema *expression.Schema) bool {
for _, col := range p.SortItems {
if schema.ColumnIndex(col.Col) == -1 {
return false
}
}
return true
}
// IsFlashProp return true if this physical property is only allowed to generate flash related task
func (p *PhysicalProperty) IsFlashProp() bool {
return p.TaskTp == CopTiFlashLocalReadTaskType || p.TaskTp == CopTiFlashGlobalReadTaskType || p.TaskTp == MppTaskType
}
// GetAllPossibleChildTaskTypes enumrates the possible types of tasks for children.
func (p *PhysicalProperty) GetAllPossibleChildTaskTypes() []TaskType {
if p.TaskTp == RootTaskType {
return wholeTaskTypes
}
// TODO: For CopSingleReadTaskType and CopDoubleReadTaskType, this function should never be called
return []TaskType{p.TaskTp}
}
// IsPrefix checks whether the order property is the prefix of another.
func (p *PhysicalProperty) IsPrefix(prop *PhysicalProperty) bool {
if len(p.SortItems) > len(prop.SortItems) {
return false
}
for i := range p.SortItems {
if !p.SortItems[i].Col.Equal(nil, prop.SortItems[i].Col) || p.SortItems[i].Desc != prop.SortItems[i].Desc {
return false
}
}
return true
}
// IsEmpty checks whether the order property is empty.
func (p *PhysicalProperty) IsEmpty() bool {
return len(p.SortItems) == 0
}
// HashCode calculates hash code for a PhysicalProperty object.
func (p *PhysicalProperty) HashCode() []byte {
if p.hashcode != nil {
return p.hashcode
}
hashcodeSize := 8 + 8 + 8 + (16+8)*len(p.SortItems) + 8
p.hashcode = make([]byte, 0, hashcodeSize)
if p.CanAddEnforcer {
p.hashcode = codec.EncodeInt(p.hashcode, 1)
} else {
p.hashcode = codec.EncodeInt(p.hashcode, 0)
}
p.hashcode = codec.EncodeInt(p.hashcode, int64(p.TaskTp))
p.hashcode = codec.EncodeFloat(p.hashcode, p.ExpectedCnt)
for _, item := range p.SortItems {
p.hashcode = append(p.hashcode, item.Col.HashCode(nil)...)
if item.Desc {
p.hashcode = codec.EncodeInt(p.hashcode, 1)
} else {
p.hashcode = codec.EncodeInt(p.hashcode, 0)
}
}
if p.TaskTp == MppTaskType {
p.hashcode = codec.EncodeInt(p.hashcode, int64(p.PartitionTp))
for _, col := range p.PartitionCols {
p.hashcode = append(p.hashcode, col.HashCode(nil)...)
}
}
return p.hashcode
}
// String implements fmt.Stringer interface. Just for test.
func (p *PhysicalProperty) String() string {
return fmt.Sprintf("Prop{cols: %v, TaskTp: %s, expectedCount: %v}", p.SortItems, p.TaskTp, p.ExpectedCnt)
}
// CloneEssentialFields returns a copy of PhysicalProperty. We only copy the essential fields that really indicate the
// property, specifically, `CanAddEnforcer` should not be included.
func (p *PhysicalProperty) CloneEssentialFields() *PhysicalProperty {
prop := &PhysicalProperty{
SortItems: p.SortItems,
TaskTp: p.TaskTp,
ExpectedCnt: p.ExpectedCnt,
PartitionTp: p.PartitionTp,
PartitionCols: p.PartitionCols,
}
return prop
}
// AllSameOrder checks if all the items have same order.
func (p *PhysicalProperty) AllSameOrder() (bool, bool) {
if len(p.SortItems) == 0 {
return true, false
}
for i := 1; i < len(p.SortItems); i++ {
if p.SortItems[i].Desc != p.SortItems[i-1].Desc {
return false, false
}
}
return true, p.SortItems[0].Desc
}
| planner/property/physical_property.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00047920175711624324,
0.00018879806157201529,
0.00016199189121834934,
0.00017177368863485754,
0.000066527230956126
] |
{
"id": 4,
"code_window": [
"\t\t\t\tlock = true\n",
"\t\t\t\twaitTime = sessVars.LockWaitTimeout\n",
"\t\t\t\tif lockInfo.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\t\t\t\twaitTime = int64(lockInfo.WaitSec * 1000)\n",
"\t\t\t\t} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\t\t\t\twaitTime = kv.LockNoWait\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\twaitTime = tikv.LockNoWait\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "replace",
"edit_start_line_idx": 518
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"math"
"strings"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/testutil"
)
func (s *testEvaluatorSuite) TestInetAton(c *C) {
tbl := []struct {
Input interface{}
Expected interface{}
}{
{"", nil},
{nil, nil},
{"255.255.255.255", 4294967295},
{"0.0.0.0", 0},
{"127.0.0.1", 2130706433},
{"0.0.0.256", nil},
{"113.14.22.3", 1896748547},
{"127", 127},
{"127.255", 2130706687},
{"127,256", nil},
{"127.2.1", 2130837505},
{"123.2.1.", nil},
{"127.0.0.1.1", nil},
}
dtbl := tblToDtbl(tbl)
fc := funcs[ast.InetAton]
for _, t := range dtbl {
f, err := fc.getFunction(s.ctx, s.datumsToConstants(t["Input"]))
c.Assert(err, IsNil)
d, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(d, testutil.DatumEquals, t["Expected"][0])
}
}
func (s *testEvaluatorSuite) TestIsIPv4(c *C) {
tests := []struct {
ip string
expect interface{}
}{
{"192.168.1.1", 1},
{"255.255.255.255", 1},
{"10.t.255.255", 0},
{"10.1.2.3.4", 0},
{"2001:250:207:0:0:eef2::1", 0},
{"::ffff:1.2.3.4", 0},
{"1...1", 0},
{"192.168.1.", 0},
{".168.1.2", 0},
{"168.1.2", 0},
{"1.2.3.4.5", 0},
}
fc := funcs[ast.IsIPv4]
for _, test := range tests {
ip := types.NewStringDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
// test NULL input for is_ipv4
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r, testutil.DatumEquals, types.NewDatum(0))
}
func (s *testEvaluatorSuite) TestUUID(c *C) {
f, err := newFunctionForTest(s.ctx, ast.UUID)
c.Assert(err, IsNil)
d, err := f.Eval(chunk.Row{})
c.Assert(err, IsNil)
parts := strings.Split(d.GetString(), "-")
c.Assert(len(parts), Equals, 5)
for i, p := range parts {
switch i {
case 0:
c.Assert(len(p), Equals, 8)
case 1:
c.Assert(len(p), Equals, 4)
case 2:
c.Assert(len(p), Equals, 4)
case 3:
c.Assert(len(p), Equals, 4)
case 4:
c.Assert(len(p), Equals, 12)
}
}
_, err = funcs[ast.UUID].getFunction(s.ctx, s.datumsToConstants(nil))
c.Assert(err, IsNil)
}
func (s *testEvaluatorSuite) TestAnyValue(c *C) {
tbl := []struct {
arg interface{}
ret interface{}
}{
{nil, nil},
{1234, 1234},
{-0x99, -0x99},
{3.1415926, 3.1415926},
{"Hello, World", "Hello, World"},
}
for _, t := range tbl {
fc := funcs[ast.AnyValue]
f, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(t.arg)))
c.Assert(err, IsNil)
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r, testutil.DatumEquals, types.NewDatum(t.ret))
}
}
func (s *testEvaluatorSuite) TestIsIPv6(c *C) {
tests := []struct {
ip string
expect interface{}
}{
{"2001:250:207:0:0:eef2::1", 1},
{"2001:0250:0207:0001:0000:0000:0000:ff02", 1},
{"2001:250:207::eff2::1,", 0},
{"192.168.1.1", 0},
{"::ffff:1.2.3.4", 1},
}
fc := funcs[ast.IsIPv6]
for _, test := range tests {
ip := types.NewStringDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
// test NULL input for is_ipv6
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r, testutil.DatumEquals, types.NewDatum(0))
}
func (s *testEvaluatorSuite) TestInetNtoa(c *C) {
tests := []struct {
ip int
expect interface{}
}{
{167773449, "10.0.5.9"},
{2063728641, "123.2.0.1"},
{0, "0.0.0.0"},
{545460846593, nil},
{-1, nil},
{math.MaxUint32, "255.255.255.255"},
}
fc := funcs[ast.InetNtoa]
for _, test := range tests {
ip := types.NewDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r.IsNull(), IsTrue)
}
func (s *testEvaluatorSuite) TestInet6NtoA(c *C) {
tests := []struct {
ip []byte
expect interface{}
}{
// Success cases
{[]byte{0x00, 0x00, 0x00, 0x00}, "0.0.0.0"},
{[]byte{0x0A, 0x00, 0x05, 0x09}, "10.0.5.9"},
{[]byte{0xFD, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0x55, 0xCA, 0xFF, 0xFE,
0xFA, 0x90, 0x89}, "fdfe::5a55:caff:fefa:9089"},
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x01,
0x02, 0x03, 0x04}, "::ffff:1.2.3.4"},
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF}, "::ffff:255.255.255.255"},
// Fail cases
{[]byte{}, nil}, // missing bytes
{[]byte{0x0A, 0x00, 0x05}, nil}, // missing a byte ipv4
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF}, nil}, // missing a byte ipv6
}
fc := funcs[ast.Inet6Ntoa]
for _, test := range tests {
ip := types.NewDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r.IsNull(), IsTrue)
}
func (s *testEvaluatorSuite) TestInet6AtoN(c *C) {
tests := []struct {
ip string
expect interface{}
}{
{"0.0.0.0", []byte{0x00, 0x00, 0x00, 0x00}},
{"10.0.5.9", []byte{0x0A, 0x00, 0x05, 0x09}},
{"fdfe::5a55:caff:fefa:9089", []byte{0xFD, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5A, 0x55, 0xCA, 0xFF, 0xFE, 0xFA, 0x90, 0x89}},
{"::ffff:1.2.3.4", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x01, 0x02, 0x03, 0x04}},
{"", nil},
{"Not IP address", nil},
{"::ffff:255.255.255.255", []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
}
fc := funcs[ast.Inet6Aton]
for _, test := range tests {
ip := types.NewDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r.IsNull(), IsTrue)
}
func (s *testEvaluatorSuite) TestIsIPv4Mapped(c *C) {
tests := []struct {
ip []byte
expect interface{}
}{
{[]byte{}, 0},
{[]byte{0x10, 0x10, 0x10, 0x10}, 0},
{[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x1, 0x2, 0x3, 0x4}, 1},
{[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xff, 0xff, 0x1, 0x2, 0x3, 0x4}, 0},
{[]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6}, 0},
}
fc := funcs[ast.IsIPv4Mapped]
for _, test := range tests {
ip := types.NewDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r, testutil.DatumEquals, types.NewDatum(int64(0)))
}
func (s *testEvaluatorSuite) TestIsIPv4Compat(c *C) {
tests := []struct {
ip []byte
expect interface{}
}{
{[]byte{}, 0},
{[]byte{0x10, 0x10, 0x10, 0x10}, 0},
{[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4}, 1},
{[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4}, 0},
{[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xff, 0xff, 0x1, 0x2, 0x3, 0x4}, 0},
{[]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6}, 0},
}
fc := funcs[ast.IsIPv4Compat]
for _, test := range tests {
ip := types.NewDatum(test.ip)
f, err := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{ip}))
c.Assert(err, IsNil)
result, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(result, testutil.DatumEquals, types.NewDatum(test.expect))
}
var argNull types.Datum
f, _ := fc.getFunction(s.ctx, s.datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
c.Assert(err, IsNil)
c.Assert(r, testutil.DatumEquals, types.NewDatum(0))
}
func (s *testEvaluatorSuite) TestNameConst(c *C) {
dec := types.NewDecFromFloatForTest(123.123)
tm := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeDatetime, 6)
du := types.Duration{Duration: 12*time.Hour + 1*time.Minute + 1*time.Second, Fsp: types.DefaultFsp}
cases := []struct {
colName string
arg interface{}
isNil bool
asserts func(d types.Datum)
}{
{"test_int", 3, false, func(d types.Datum) {
c.Assert(d.GetInt64(), Equals, int64(3))
}},
{"test_float", 3.14159, false, func(d types.Datum) {
c.Assert(d.GetFloat64(), Equals, 3.14159)
}},
{"test_string", "TiDB", false, func(d types.Datum) {
c.Assert(d.GetString(), Equals, "TiDB")
}},
{"test_null", nil, true, func(d types.Datum) {
c.Assert(d.Kind(), Equals, types.KindNull)
}},
{"test_decimal", dec, false, func(d types.Datum) {
c.Assert(d.GetMysqlDecimal().String(), Equals, dec.String())
}},
{"test_time", tm, false, func(d types.Datum) {
c.Assert(d.GetMysqlTime().String(), Equals, tm.String())
}},
{"test_duration", du, false, func(d types.Datum) {
c.Assert(d.GetMysqlDuration().String(), Equals, du.String())
}},
}
for _, t := range cases {
f, err := newFunctionForTest(s.ctx, ast.NameConst, s.primitiveValsToConstants([]interface{}{t.colName, t.arg})...)
c.Assert(err, IsNil)
d, err := f.Eval(chunk.Row{})
c.Assert(err, IsNil)
t.asserts(d)
}
}
| expression/builtin_miscellaneous_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017799154738895595,
0.00017161265714094043,
0.00016498849436175078,
0.00017174388631246984,
0.0000034694048736128025
] |
{
"id": 4,
"code_window": [
"\t\t\t\tlock = true\n",
"\t\t\t\twaitTime = sessVars.LockWaitTimeout\n",
"\t\t\t\tif lockInfo.LockType == ast.SelectLockForUpdateWaitN {\n",
"\t\t\t\t\twaitTime = int64(lockInfo.WaitSec * 1000)\n",
"\t\t\t\t} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {\n",
"\t\t\t\t\twaitTime = kv.LockNoWait\n",
"\t\t\t\t}\n",
"\t\t\t}\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\twaitTime = tikv.LockNoWait\n"
],
"file_path": "planner/core/point_get_plan.go",
"type": "replace",
"edit_start_line_idx": 518
} | [
{
"Name": "TestGroupNDVs",
"Cases": [
{
"SQL": "select count(1) from t1 group by a, b",
"AggInput": "[{[1 2] 4}]",
"JoinInput": ""
},
{
"SQL": "select * from t1, t2 where t1.a = t2.a and t1.b = t2.b",
"AggInput": "",
"JoinInput": "[{[5 6] 4}];[{[8 9] 9}]"
},
{
"SQL": "select count(1) from t1 where a > 0 group by a, b",
"AggInput": "[{[11 12] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(1) from t1 where b > 0 group by a, b",
"AggInput": "[{[15 16] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(1) from t1 where cos(a) > 0 group by a, b",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(c3) from (select a as c1, b as c2, a+1 as c3 from t1) as tmp group by c2, c1",
"AggInput": "[{[23 24] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(c3) from (select a+b as c1, b as c2, a+1 as c3 from t1) as tmp group by c2, c1",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b > (select t2.b from t2 where t2.a = t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[37 38] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b > (select t2.b from t2 where t2.a = t1.a)) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[56 57] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[67 68] 4}]",
"JoinInput": ""
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b in (select t2.b from t2 where t2.a = t1.a limit 3)) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b not in (select t2.b from t2 where t2.a = t1.a limit 3)) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(1) from t1, t2 where t1.a = t2.a group by t1.a, t1.b",
"AggInput": "[]",
"JoinInput": "[];[]"
},
{
"SQL": "select count(1) from t1 left join t2 on t1.a = t2.a group by t1.a, t1.b",
"AggInput": "[{[99 100] 4}]",
"JoinInput": "[{[99 100] 4}];[]"
},
{
"SQL": "select count(1) from t1 left join t2 on t1.a = t2.a group by t2.a, t2.b",
"AggInput": "[]",
"JoinInput": "[];[]"
},
{
"SQL": "select count(1) from t1 right join t2 on t1.a = t2.a group by t1.a, t1.b",
"AggInput": "[]",
"JoinInput": "[];[]"
},
{
"SQL": "select count(1) from t1 right join t2 on t1.a = t2.a group by t2.a, t2.b",
"AggInput": "[{[123 124] 9}]",
"JoinInput": "[];[{[123 124] 9}]"
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[127 128] 4}]",
"JoinInput": "[{[127 128] 4}];[]"
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[138 139] 4}]",
"JoinInput": "[{[138 139] 4}];[]"
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b in (select t2.b from t2 where t2.a > t1.a)) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": "[];[]"
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b not in (select t2.b from t2 where t2.a > t1.a)) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": "[];[]"
},
{
"SQL": "select * from t1 left join (select t2.a as a, t2.b as b, count(1) as cnt from t2 group by t2.a, t2.b) as tmp on t1.a = tmp.a and t1.b = tmp.b",
"AggInput": "[{[166 167] 9}]",
"JoinInput": "[{[163 164] 4}];[{[166 167] 9}]"
},
{
"SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 limit 3) tmp group by tmp.a, tmp.b",
"AggInput": "[]",
"JoinInput": ""
},
{
"SQL": "select count(tmp.a_sum) from (select t1.a as a, t1.b as b, sum(a) over() as a_sum from t1) tmp group by tmp.a, tmp.b",
"AggInput": "[{[174 175] 4}]",
"JoinInput": ""
}
]
},
{
"Name": "TestCardinalityGroupCols",
"Cases": [
{
"SQL": "select count(1) from t1 group by a, b",
"Plan": [
"StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(1)->Column#4",
"└─IndexReader 4.00 root index:IndexFullScan",
" └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true"
]
},
{
"SQL": "select * from t1, t2 where t1.a = t2.a and t1.b = t2.b",
"Plan": [
"MergeJoin 4.00 root inner join, left key:test.t1.a, test.t1.b, right key:test.t2.a, test.t2.b",
"├─IndexReader(Build) 9.00 root index:IndexFullScan",
"│ └─IndexFullScan 9.00 cop[tikv] table:t2, index:a(a, b) keep order:true",
"└─IndexReader(Probe) 4.00 root index:IndexFullScan",
" └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true"
]
},
{
"SQL": "select count(1) from t1 where a > 0 group by a, b",
"Plan": [
"StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(1)->Column#4",
"└─IndexReader 4.00 root index:IndexRangeScan",
" └─IndexRangeScan 4.00 cop[tikv] table:t1, index:a(a, b) range:(0,+inf], keep order:true"
]
},
{
"SQL": "select count(1) from t1 where b > 0 group by a, b",
"Plan": [
"StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(1)->Column#4",
"└─IndexReader 4.00 root index:Selection",
" └─Selection 4.00 cop[tikv] gt(test.t1.b, 0)",
" └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true"
]
},
{
"SQL": "select count(c3) from (select a as c1, b as c2, a+1 as c3 from t1) as tmp group by c2, c1",
"Plan": [
"StreamAgg 4.00 root group by:Column#10, Column#11, funcs:count(Column#9)->Column#5",
"└─Projection 4.00 root plus(test.t1.a, 1)->Column#9, test.t1.b, test.t1.a",
" └─IndexReader 4.00 root index:IndexFullScan",
" └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true"
]
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b > (select t2.b from t2 where t2.a = t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"StreamAgg 4.00 root group by:Column#14, Column#15, funcs:count(Column#13)->Column#11",
"└─Projection 4.00 root gt(test.t1.b, test.t2.b)->Column#13, test.t1.a, test.t1.b",
" └─Apply 4.00 root CARTESIAN left outer join",
" ├─IndexReader(Build) 4.00 root index:IndexFullScan",
" │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true",
" └─MaxOneRow(Probe) 1.00 root ",
" └─IndexReader 2.00 root index:IndexRangeScan",
" └─IndexRangeScan 2.00 cop[tikv] table:t2, index:a(a, b) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false"
]
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11",
"└─Apply 4.00 root left outer semi join, equal:[eq(test.t1.b, test.t2.b)]",
" ├─IndexReader(Build) 4.00 root index:IndexFullScan",
" │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true",
" └─Limit(Probe) 3.00 root offset:0, count:3",
" └─IndexReader 3.00 root index:Limit",
" └─Limit 3.00 cop[tikv] offset:0, count:3",
" └─IndexRangeScan 3.00 cop[tikv] table:t2, index:a(a, b) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false"
]
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11",
"└─Apply 4.00 root anti left outer semi join, equal:[eq(test.t1.b, test.t2.b)]",
" ├─IndexReader(Build) 4.00 root index:IndexFullScan",
" │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true",
" └─Limit(Probe) 3.00 root offset:0, count:3",
" └─IndexReader 3.00 root index:Limit",
" └─Limit 3.00 cop[tikv] offset:0, count:3",
" └─IndexRangeScan 3.00 cop[tikv] table:t2, index:a(a, b) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false"
]
},
{
"SQL": "select count(1) from t1 left join t2 on t1.a = t2.a group by t1.a, t1.b",
"Plan": [
"HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(1)->Column#7",
"└─HashJoin 12.00 root left outer join, equal:[eq(test.t1.a, test.t2.a)]",
" ├─TableReader(Build) 4.00 root data:TableFullScan",
" │ └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false",
" └─TableReader(Probe) 9.00 root data:TableFullScan",
" └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false"
]
},
{
"SQL": "select count(1) from t1 right join t2 on t1.a = t2.a group by t2.a, t2.b",
"Plan": [
"HashAgg 9.00 root group by:test.t2.a, test.t2.b, funcs:count(1)->Column#7",
"└─HashJoin 12.00 root right outer join, equal:[eq(test.t1.a, test.t2.a)]",
" ├─TableReader(Build) 4.00 root data:TableFullScan",
" │ └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false",
" └─TableReader(Probe) 9.00 root data:TableFullScan",
" └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false"
]
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11",
"└─HashJoin 4.00 root left outer semi join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t2.a, test.t1.a)",
" ├─TableReader(Build) 9.00 root data:TableFullScan",
" │ └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false",
" └─TableReader(Probe) 4.00 root data:TableFullScan",
" └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false"
]
},
{
"SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11",
"└─HashJoin 4.00 root anti left outer semi join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t2.a, test.t1.a)",
" ├─TableReader(Build) 9.00 root data:TableFullScan",
" │ └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false",
" └─TableReader(Probe) 4.00 root data:TableFullScan",
" └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false"
]
},
{
"SQL": "select * from t1 left join (select t2.a as a, t2.b as b, count(1) as cnt from t2 group by t2.a, t2.b) as tmp on t1.a = tmp.a and t1.b = tmp.b",
"Plan": [
"Projection 4.00 root test.t1.a, test.t1.b, test.t2.a, test.t2.b, Column#7",
"└─MergeJoin 4.00 root left outer join, left key:test.t1.a, test.t1.b, right key:test.t2.a, test.t2.b",
" ├─StreamAgg(Build) 9.00 root group by:test.t2.a, test.t2.b, funcs:count(1)->Column#7, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
" │ └─IndexReader 9.00 root index:IndexFullScan",
" │ └─IndexFullScan 9.00 cop[tikv] table:t2, index:a(a, b) keep order:true",
" └─IndexReader(Probe) 4.00 root index:IndexFullScan",
" └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true"
]
},
{
"SQL": "select count(tmp.a_sum) from (select t1.a as a, t1.b as b, sum(a) over() as a_sum from t1) tmp group by tmp.a, tmp.b",
"Plan": [
"HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#5)->Column#6",
"└─Window 4.00 root sum(cast(test.t1.a, decimal(32,0) BINARY))->Column#5 over()",
" └─TableReader 4.00 root data:TableFullScan",
" └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false"
]
}
]
}
]
| planner/core/testdata/stats_suite_out.json | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0001757070276653394,
0.00016986610717140138,
0.00016221044643316418,
0.00016952982696238905,
0.0000031013012176117627
] |
{
"id": 5,
"code_window": [
"\n",
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/failpoint\"\n",
"\tpb \"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\ttidbkv \"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/logutil\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/metrics\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.06128443777561188,
0.0013171621831133962,
0.00016257711104117334,
0.00017027434660121799,
0.007157760672271252
] |
{
"id": 5,
"code_window": [
"\n",
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/failpoint\"\n",
"\tpb \"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\ttidbkv \"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/logutil\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/metrics\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"math/rand"
. "github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
var _ = Suite(&testSuiteJoiner{})
type testSuiteJoiner struct{}
func (s *testSuiteJoiner) SetUpSuite(c *C) {
}
func (s *testSuiteJoiner) TestRequiredRows(c *C) {
joinTypes := []core.JoinType{core.InnerJoin, core.LeftOuterJoin, core.RightOuterJoin}
lTypes := [][]byte{
{mysql.TypeLong},
{mysql.TypeFloat},
{mysql.TypeLong, mysql.TypeFloat},
}
rTypes := lTypes
convertTypes := func(mysqlTypes []byte) []*types.FieldType {
fieldTypes := make([]*types.FieldType, 0, len(mysqlTypes))
for _, t := range mysqlTypes {
fieldTypes = append(fieldTypes, types.NewFieldType(t))
}
return fieldTypes
}
for _, joinType := range joinTypes {
for _, ltype := range lTypes {
for _, rtype := range rTypes {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
lfields := convertTypes(ltype)
rfields := convertTypes(rtype)
outerRow := genTestChunk(maxChunkSize, 1, lfields).GetRow(0)
innerChk := genTestChunk(maxChunkSize, maxChunkSize, rfields)
var defaultInner []types.Datum
for i, f := range rfields {
defaultInner = append(defaultInner, innerChk.GetRow(0).GetDatum(i, f))
}
joiner := newJoiner(defaultCtx(), joinType, false, defaultInner, nil, lfields, rfields, nil)
fields := make([]*types.FieldType, 0, len(lfields)+len(rfields))
fields = append(fields, rfields...)
fields = append(fields, lfields...)
result := chunk.New(fields, maxChunkSize, maxChunkSize)
for i := 0; i < 10; i++ {
required := rand.Int()%maxChunkSize + 1
result.SetRequiredRows(required, maxChunkSize)
result.Reset()
it := chunk.NewIterator4Chunk(innerChk)
it.Begin()
_, _, err := joiner.tryToMatchInners(outerRow, it, result)
c.Assert(err, IsNil)
c.Assert(result.NumRows(), Equals, required)
}
}
}
}
}
func genTestChunk(maxChunkSize int, numRows int, fields []*types.FieldType) *chunk.Chunk {
chk := chunk.New(fields, maxChunkSize, maxChunkSize)
for numRows > 0 {
numRows--
for col, field := range fields {
switch field.Tp {
case mysql.TypeLong:
chk.AppendInt64(col, 0)
case mysql.TypeFloat:
chk.AppendFloat32(col, 0)
default:
panic("not support")
}
}
}
return chk
}
| executor/joiner_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0013809588272124529,
0.00028042710619047284,
0.0001650382182560861,
0.00016892296844162047,
0.0003480591403786093
] |
{
"id": 5,
"code_window": [
"\n",
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/failpoint\"\n",
"\tpb \"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\ttidbkv \"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/logutil\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/metrics\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tipb/go-tipb"
)
// PBPlanBuilder uses to build physical plan from dag protocol buffers.
type PBPlanBuilder struct {
sctx sessionctx.Context
tps []*types.FieldType
is infoschema.InfoSchema
ranges []*coprocessor.KeyRange
}
// NewPBPlanBuilder creates a new pb plan builder.
func NewPBPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema, ranges []*coprocessor.KeyRange) *PBPlanBuilder {
return &PBPlanBuilder{sctx: sctx, is: is, ranges: ranges}
}
// Build builds physical plan from dag protocol buffers.
func (b *PBPlanBuilder) Build(executors []*tipb.Executor) (p PhysicalPlan, err error) {
var src PhysicalPlan
for i := 0; i < len(executors); i++ {
curr, err := b.pbToPhysicalPlan(executors[i])
if err != nil {
return nil, errors.Trace(err)
}
if src != nil {
curr.SetChildren(src)
}
src = curr
}
_, src = b.predicatePushDown(src, nil)
return src, nil
}
func (b *PBPlanBuilder) pbToPhysicalPlan(e *tipb.Executor) (p PhysicalPlan, err error) {
switch e.Tp {
case tipb.ExecType_TypeTableScan:
p, err = b.pbToTableScan(e)
case tipb.ExecType_TypeSelection:
p, err = b.pbToSelection(e)
case tipb.ExecType_TypeTopN:
p, err = b.pbToTopN(e)
case tipb.ExecType_TypeLimit:
p, err = b.pbToLimit(e)
case tipb.ExecType_TypeAggregation:
p, err = b.pbToAgg(e, false)
case tipb.ExecType_TypeStreamAgg:
p, err = b.pbToAgg(e, true)
case tipb.ExecType_TypeKill:
p, err = b.pbToKill(e)
default:
// TODO: Support other types.
err = errors.Errorf("this exec type %v doesn't support yet.", e.GetTp())
}
return p, err
}
func (b *PBPlanBuilder) pbToTableScan(e *tipb.Executor) (PhysicalPlan, error) {
tblScan := e.TblScan
tbl, ok := b.is.TableByID(tblScan.TableId)
if !ok {
return nil, infoschema.ErrTableNotExists.GenWithStack("Table which ID = %d does not exist.", tblScan.TableId)
}
dbInfo, ok := b.is.SchemaByTable(tbl.Meta())
if !ok {
return nil, infoschema.ErrDatabaseNotExists.GenWithStack("Database of table ID = %d does not exist.", tblScan.TableId)
}
// Currently only support cluster table.
if !tbl.Type().IsClusterTable() {
return nil, errors.Errorf("table %s is not a cluster table", tbl.Meta().Name.L)
}
columns, err := b.convertColumnInfo(tbl.Meta(), tblScan.Columns)
if err != nil {
return nil, err
}
schema := b.buildTableScanSchema(tbl.Meta(), columns)
p := PhysicalMemTable{
DBName: dbInfo.Name,
Table: tbl.Meta(),
Columns: columns,
}.Init(b.sctx, &property.StatsInfo{}, 0)
p.SetSchema(schema)
if strings.ToUpper(p.Table.Name.O) == infoschema.ClusterTableSlowLog {
extractor := &SlowQueryExtractor{}
extractor.Desc = tblScan.Desc
if b.ranges != nil {
err := extractor.buildTimeRangeFromKeyRange(b.ranges)
if err != nil {
return nil, err
}
}
p.Extractor = extractor
}
return p, nil
}
func (b *PBPlanBuilder) buildTableScanSchema(tblInfo *model.TableInfo, columns []*model.ColumnInfo) *expression.Schema {
schema := expression.NewSchema(make([]*expression.Column, 0, len(columns))...)
for _, col := range tblInfo.Columns {
for _, colInfo := range columns {
if col.ID != colInfo.ID {
continue
}
newCol := &expression.Column{
UniqueID: b.sctx.GetSessionVars().AllocPlanColumnID(),
ID: col.ID,
RetType: &col.FieldType,
}
schema.Append(newCol)
}
}
return schema
}
func (b *PBPlanBuilder) pbToSelection(e *tipb.Executor) (PhysicalPlan, error) {
conds, err := expression.PBToExprs(e.Selection.Conditions, b.tps, b.sctx.GetSessionVars().StmtCtx)
if err != nil {
return nil, err
}
p := PhysicalSelection{
Conditions: conds,
}.Init(b.sctx, &property.StatsInfo{}, 0, &property.PhysicalProperty{})
return p, nil
}
func (b *PBPlanBuilder) pbToTopN(e *tipb.Executor) (PhysicalPlan, error) {
topN := e.TopN
sc := b.sctx.GetSessionVars().StmtCtx
byItems := make([]*util.ByItems, 0, len(topN.OrderBy))
for _, item := range topN.OrderBy {
expr, err := expression.PBToExpr(item.Expr, b.tps, sc)
if err != nil {
return nil, errors.Trace(err)
}
byItems = append(byItems, &util.ByItems{Expr: expr, Desc: item.Desc})
}
p := PhysicalTopN{
ByItems: byItems,
Count: topN.Limit,
}.Init(b.sctx, &property.StatsInfo{}, 0, &property.PhysicalProperty{})
return p, nil
}
func (b *PBPlanBuilder) pbToLimit(e *tipb.Executor) (PhysicalPlan, error) {
p := PhysicalLimit{
Count: e.Limit.Limit,
}.Init(b.sctx, &property.StatsInfo{}, 0, &property.PhysicalProperty{})
return p, nil
}
func (b *PBPlanBuilder) pbToAgg(e *tipb.Executor, isStreamAgg bool) (PhysicalPlan, error) {
aggFuncs, groupBys, err := b.getAggInfo(e)
if err != nil {
return nil, errors.Trace(err)
}
schema := b.buildAggSchema(aggFuncs, groupBys)
baseAgg := basePhysicalAgg{
AggFuncs: aggFuncs,
GroupByItems: groupBys,
}
baseAgg.schema = schema
var partialAgg PhysicalPlan
if isStreamAgg {
partialAgg = baseAgg.initForStream(b.sctx, &property.StatsInfo{}, 0, &property.PhysicalProperty{})
} else {
partialAgg = baseAgg.initForHash(b.sctx, &property.StatsInfo{}, 0, &property.PhysicalProperty{})
}
return partialAgg, nil
}
func (b *PBPlanBuilder) buildAggSchema(aggFuncs []*aggregation.AggFuncDesc, groupBys []expression.Expression) *expression.Schema {
schema := expression.NewSchema(make([]*expression.Column, 0, len(aggFuncs)+len(groupBys))...)
for _, agg := range aggFuncs {
newCol := &expression.Column{
UniqueID: b.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: agg.RetTp,
}
schema.Append(newCol)
}
return schema
}
func (b *PBPlanBuilder) getAggInfo(executor *tipb.Executor) ([]*aggregation.AggFuncDesc, []expression.Expression, error) {
var err error
aggFuncs := make([]*aggregation.AggFuncDesc, 0, len(executor.Aggregation.AggFunc))
for _, expr := range executor.Aggregation.AggFunc {
aggFunc, err := aggregation.PBExprToAggFuncDesc(b.sctx, expr, b.tps)
if err != nil {
return nil, nil, errors.Trace(err)
}
aggFuncs = append(aggFuncs, aggFunc)
}
groupBys, err := expression.PBToExprs(executor.Aggregation.GetGroupBy(), b.tps, b.sctx.GetSessionVars().StmtCtx)
if err != nil {
return nil, nil, errors.Trace(err)
}
return aggFuncs, groupBys, nil
}
func (b *PBPlanBuilder) convertColumnInfo(tblInfo *model.TableInfo, pbColumns []*tipb.ColumnInfo) ([]*model.ColumnInfo, error) {
columns := make([]*model.ColumnInfo, 0, len(pbColumns))
tps := make([]*types.FieldType, 0, len(pbColumns))
for _, col := range pbColumns {
found := false
for _, colInfo := range tblInfo.Columns {
if col.ColumnId == colInfo.ID {
columns = append(columns, colInfo)
tps = append(tps, colInfo.FieldType.Clone())
found = true
break
}
}
if !found {
return nil, errors.Errorf("Column ID %v of table %v not found", col.ColumnId, tblInfo.Name.L)
}
}
b.tps = tps
return columns, nil
}
func (b *PBPlanBuilder) pbToKill(e *tipb.Executor) (PhysicalPlan, error) {
node := &ast.KillStmt{
ConnectionID: e.Kill.ConnID,
Query: e.Kill.Query,
}
simple := Simple{Statement: node, IsFromRemote: true}
return &PhysicalSimpleWrapper{Inner: simple}, nil
}
func (b *PBPlanBuilder) predicatePushDown(p PhysicalPlan, predicates []expression.Expression) ([]expression.Expression, PhysicalPlan) {
if p == nil {
return predicates, p
}
switch p.(type) {
case *PhysicalMemTable:
memTable := p.(*PhysicalMemTable)
if memTable.Extractor == nil {
return predicates, p
}
names := make([]*types.FieldName, 0, len(memTable.Columns))
for _, col := range memTable.Columns {
names = append(names, &types.FieldName{
TblName: memTable.Table.Name,
ColName: col.Name,
OrigTblName: memTable.Table.Name,
OrigColName: col.Name,
})
}
// Set the expression column unique ID.
// Since the expression is build from PB, It has not set the expression column ID yet.
schemaCols := memTable.schema.Columns
cols := expression.ExtractColumnsFromExpressions([]*expression.Column{}, predicates, nil)
for i := range cols {
cols[i].UniqueID = schemaCols[cols[i].Index].UniqueID
}
predicates = memTable.Extractor.Extract(b.sctx, memTable.schema, names, predicates)
return predicates, memTable
case *PhysicalSelection:
selection := p.(*PhysicalSelection)
conditions, child := b.predicatePushDown(p.Children()[0], selection.Conditions)
if len(conditions) > 0 {
selection.Conditions = conditions
selection.SetChildren(child)
return predicates, selection
}
return predicates, child
default:
if children := p.Children(); len(children) > 0 {
_, child := b.predicatePushDown(children[0], nil)
p.SetChildren(child)
}
return predicates, p
}
}
| planner/core/pb_to_plan.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.004351307637989521,
0.00036362395621836185,
0.00016515595780219883,
0.00017089216271415353,
0.0007460913620889187
] |
{
"id": 5,
"code_window": [
"\n",
"\t\"github.com/pingcap/errors\"\n",
"\t\"github.com/pingcap/failpoint\"\n",
"\tpb \"github.com/pingcap/kvproto/pkg/kvrpcpb\"\n",
"\ttidbkv \"github.com/pingcap/tidb/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/kv\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/logutil\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/metrics\"\n",
"\t\"github.com/pingcap/tidb/store/tikv/tikvrpc\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 25
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package privileges
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"strings"
"sync"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/infoschema/perfschema"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/sem"
"go.uber.org/zap"
)
// SkipWithGrant causes the server to start without using the privilege system at all.
var SkipWithGrant = false
var _ privilege.Manager = (*UserPrivileges)(nil)
var dynamicPrivs = []string{
"BACKUP_ADMIN",
"SYSTEM_VARIABLES_ADMIN",
"ROLE_ADMIN",
"CONNECTION_ADMIN",
"RESTRICTED_TABLES_ADMIN",
}
var dynamicPrivLock sync.Mutex
// UserPrivileges implements privilege.Manager interface.
// This is used to check privilege for the current user.
type UserPrivileges struct {
user string
host string
*Handle
}
// RequestDynamicVerification implements the Manager interface.
func (p *UserPrivileges) RequestDynamicVerification(activeRoles []*auth.RoleIdentity, privName string, grantable bool) bool {
if SkipWithGrant {
return true
}
if p.user == "" && p.host == "" {
return true
}
mysqlPriv := p.Handle.Get()
return mysqlPriv.RequestDynamicVerification(activeRoles, p.user, p.host, privName, grantable)
}
// RequestVerification implements the Manager interface.
func (p *UserPrivileges) RequestVerification(activeRoles []*auth.RoleIdentity, db, table, column string, priv mysql.PrivilegeType) bool {
if SkipWithGrant {
return true
}
if p.user == "" && p.host == "" {
return true
}
// Skip check for system databases.
// See https://dev.mysql.com/doc/refman/5.7/en/information-schema.html
dbLowerName := strings.ToLower(db)
tblLowerName := strings.ToLower(table)
// If SEM is enabled and the user does not have the RESTRICTED_TABLES_ADMIN privilege
// There are some hard rules which overwrite system tables and schemas as read-only at most.
if sem.IsEnabled() && !p.RequestDynamicVerification(activeRoles, "RESTRICTED_TABLES_ADMIN", false) {
if sem.IsInvisibleTable(dbLowerName, tblLowerName) {
return false
}
if util.IsMemOrSysDB(dbLowerName) {
switch priv {
case mysql.CreatePriv, mysql.AlterPriv, mysql.DropPriv, mysql.IndexPriv, mysql.CreateViewPriv,
mysql.InsertPriv, mysql.UpdatePriv, mysql.DeletePriv:
return false
}
}
}
switch dbLowerName {
case util.InformationSchemaName.L:
switch priv {
case mysql.CreatePriv, mysql.AlterPriv, mysql.DropPriv, mysql.IndexPriv, mysql.CreateViewPriv,
mysql.InsertPriv, mysql.UpdatePriv, mysql.DeletePriv:
return false
}
return true
// We should be very careful of limiting privileges, so ignore `mysql` for now.
case util.PerformanceSchemaName.L, util.MetricSchemaName.L:
if (dbLowerName == util.PerformanceSchemaName.L && perfschema.IsPredefinedTable(table)) ||
(dbLowerName == util.MetricSchemaName.L && infoschema.IsMetricTable(table)) {
switch priv {
case mysql.CreatePriv, mysql.AlterPriv, mysql.DropPriv, mysql.IndexPriv, mysql.InsertPriv, mysql.UpdatePriv, mysql.DeletePriv:
return false
case mysql.SelectPriv:
return true
}
}
}
mysqlPriv := p.Handle.Get()
return mysqlPriv.RequestVerification(activeRoles, p.user, p.host, db, table, column, priv)
}
// RequestVerificationWithUser implements the Manager interface.
func (p *UserPrivileges) RequestVerificationWithUser(db, table, column string, priv mysql.PrivilegeType, user *auth.UserIdentity) bool {
if SkipWithGrant {
return true
}
if user == nil {
return false
}
// Skip check for INFORMATION_SCHEMA database.
// See https://dev.mysql.com/doc/refman/5.7/en/information-schema.html
if strings.EqualFold(db, "INFORMATION_SCHEMA") {
return true
}
mysqlPriv := p.Handle.Get()
return mysqlPriv.RequestVerification(nil, user.Username, user.Hostname, db, table, column, priv)
}
// GetEncodedPassword implements the Manager interface.
func (p *UserPrivileges) GetEncodedPassword(user, host string) string {
mysqlPriv := p.Handle.Get()
record := mysqlPriv.connectionVerification(user, host)
if record == nil {
logutil.BgLogger().Error("get user privilege record fail",
zap.String("user", user), zap.String("host", host))
return ""
}
pwd := record.AuthenticationString
if len(pwd) != 0 && len(pwd) != mysql.PWDHashLen+1 {
logutil.BgLogger().Error("user password from system DB not like sha1sum", zap.String("user", user))
return ""
}
return pwd
}
// GetAuthWithoutVerification implements the Manager interface.
func (p *UserPrivileges) GetAuthWithoutVerification(user, host string) (u string, h string, success bool) {
if SkipWithGrant {
p.user = user
p.host = host
success = true
return
}
mysqlPriv := p.Handle.Get()
record := mysqlPriv.connectionVerification(user, host)
if record == nil {
logutil.BgLogger().Error("get user privilege record fail",
zap.String("user", user), zap.String("host", host))
return
}
u = record.User
h = record.Host
p.user = user
p.host = h
success = true
return
}
// ConnectionVerification implements the Manager interface.
func (p *UserPrivileges) ConnectionVerification(user, host string, authentication, salt []byte, tlsState *tls.ConnectionState) (u string, h string, success bool) {
if SkipWithGrant {
p.user = user
p.host = host
success = true
return
}
mysqlPriv := p.Handle.Get()
record := mysqlPriv.connectionVerification(user, host)
if record == nil {
logutil.BgLogger().Error("get user privilege record fail",
zap.String("user", user), zap.String("host", host))
return
}
u = record.User
h = record.Host
globalPriv := mysqlPriv.matchGlobalPriv(user, host)
if globalPriv != nil {
if !p.checkSSL(globalPriv, tlsState) {
logutil.BgLogger().Error("global priv check ssl fail",
zap.String("user", user), zap.String("host", host))
success = false
return
}
}
// Login a locked account is not allowed.
locked := record.AccountLocked
if locked {
logutil.BgLogger().Error("try to login a locked account",
zap.String("user", user), zap.String("host", host))
success = false
return
}
pwd := record.AuthenticationString
if len(pwd) != 0 && len(pwd) != mysql.PWDHashLen+1 {
logutil.BgLogger().Error("user password from system DB not like sha1sum", zap.String("user", user))
return
}
// empty password
if len(pwd) == 0 && len(authentication) == 0 {
p.user = user
p.host = h
success = true
return
}
if len(pwd) == 0 || len(authentication) == 0 {
return
}
hpwd, err := auth.DecodePassword(pwd)
if err != nil {
logutil.BgLogger().Error("decode password string failed", zap.Error(err))
return
}
if !auth.CheckScrambledPassword(salt, hpwd, authentication) {
return
}
p.user = user
p.host = h
success = true
return
}
type checkResult int
const (
notCheck checkResult = iota
pass
fail
)
func (p *UserPrivileges) checkSSL(priv *globalPrivRecord, tlsState *tls.ConnectionState) bool {
if priv.Broken {
logutil.BgLogger().Info("ssl check failure, due to broken global_priv record",
zap.String("user", priv.User), zap.String("host", priv.Host))
return false
}
switch priv.Priv.SSLType {
case SslTypeNotSpecified, SslTypeNone:
return true
case SslTypeAny:
r := tlsState != nil
if !r {
logutil.BgLogger().Info("ssl check failure, require ssl but not use ssl",
zap.String("user", priv.User), zap.String("host", priv.Host))
}
return r
case SslTypeX509:
if tlsState == nil {
logutil.BgLogger().Info("ssl check failure, require x509 but not use ssl",
zap.String("user", priv.User), zap.String("host", priv.Host))
return false
}
hasCert := false
for _, chain := range tlsState.VerifiedChains {
if len(chain) > 0 {
hasCert = true
break
}
}
if !hasCert {
logutil.BgLogger().Info("ssl check failure, require x509 but no verified cert",
zap.String("user", priv.User), zap.String("host", priv.Host))
}
return hasCert
case SslTypeSpecified:
if tlsState == nil {
logutil.BgLogger().Info("ssl check failure, require subject/issuer/cipher but not use ssl",
zap.String("user", priv.User), zap.String("host", priv.Host))
return false
}
if len(priv.Priv.SSLCipher) > 0 && priv.Priv.SSLCipher != util.TLSCipher2String(tlsState.CipherSuite) {
logutil.BgLogger().Info("ssl check failure for cipher", zap.String("user", priv.User), zap.String("host", priv.Host),
zap.String("require", priv.Priv.SSLCipher), zap.String("given", util.TLSCipher2String(tlsState.CipherSuite)))
return false
}
var (
hasCert = false
matchIssuer checkResult
matchSubject checkResult
matchSAN checkResult
)
for _, chain := range tlsState.VerifiedChains {
if len(chain) == 0 {
continue
}
cert := chain[0]
if len(priv.Priv.X509Issuer) > 0 {
given := util.X509NameOnline(cert.Issuer)
if priv.Priv.X509Issuer == given {
matchIssuer = pass
} else if matchIssuer == notCheck {
matchIssuer = fail
logutil.BgLogger().Info("ssl check failure for issuer", zap.String("user", priv.User), zap.String("host", priv.Host),
zap.String("require", priv.Priv.X509Issuer), zap.String("given", given))
}
}
if len(priv.Priv.X509Subject) > 0 {
given := util.X509NameOnline(cert.Subject)
if priv.Priv.X509Subject == given {
matchSubject = pass
} else if matchSubject == notCheck {
matchSubject = fail
logutil.BgLogger().Info("ssl check failure for subject", zap.String("user", priv.User), zap.String("host", priv.Host),
zap.String("require", priv.Priv.X509Subject), zap.String("given", given))
}
}
if len(priv.Priv.SANs) > 0 {
matchOne := checkCertSAN(priv, cert, priv.Priv.SANs)
if matchOne {
matchSAN = pass
} else if matchSAN == notCheck {
matchSAN = fail
}
}
hasCert = true
}
checkResult := hasCert && matchIssuer != fail && matchSubject != fail && matchSAN != fail
if !checkResult && !hasCert {
logutil.BgLogger().Info("ssl check failure, require issuer/subject/SAN but no verified cert",
zap.String("user", priv.User), zap.String("host", priv.Host))
}
return checkResult
default:
panic(fmt.Sprintf("support ssl_type: %d", priv.Priv.SSLType))
}
}
func checkCertSAN(priv *globalPrivRecord, cert *x509.Certificate, sans map[util.SANType][]string) (r bool) {
r = true
for typ, requireOr := range sans {
var (
unsupported bool
given []string
)
switch typ {
case util.URI:
for _, uri := range cert.URIs {
given = append(given, uri.String())
}
case util.DNS:
given = cert.DNSNames
case util.IP:
for _, ip := range cert.IPAddresses {
given = append(given, ip.String())
}
default:
unsupported = true
}
if unsupported {
logutil.BgLogger().Warn("skip unsupported SAN type", zap.String("type", string(typ)),
zap.String("user", priv.User), zap.String("host", priv.Host))
continue
}
var givenMatchOne bool
for _, req := range requireOr {
for _, give := range given {
if req == give {
givenMatchOne = true
break
}
}
}
if !givenMatchOne {
logutil.BgLogger().Info("ssl check failure for subject", zap.String("user", priv.User), zap.String("host", priv.Host),
zap.String("require", priv.Priv.SAN), zap.Strings("given", given), zap.String("type", string(typ)))
r = false
return
}
}
return
}
// DBIsVisible implements the Manager interface.
func (p *UserPrivileges) DBIsVisible(activeRoles []*auth.RoleIdentity, db string) bool {
if SkipWithGrant {
return true
}
// If SEM is enabled, respect hard rules about certain schemas being invisible
// Before checking if the user has permissions granted to them.
if sem.IsEnabled() && !p.RequestDynamicVerification(activeRoles, "RESTRICTED_TABLES_ADMIN", false) {
if sem.IsInvisibleSchema(db) {
return false
}
}
mysqlPriv := p.Handle.Get()
if mysqlPriv.DBIsVisible(p.user, p.host, db) {
return true
}
allRoles := mysqlPriv.FindAllRole(activeRoles)
for _, role := range allRoles {
if mysqlPriv.DBIsVisible(role.Username, role.Hostname, db) {
return true
}
}
return false
}
// UserPrivilegesTable implements the Manager interface.
func (p *UserPrivileges) UserPrivilegesTable() [][]types.Datum {
mysqlPriv := p.Handle.Get()
return mysqlPriv.UserPrivilegesTable()
}
// ShowGrants implements privilege.Manager ShowGrants interface.
func (p *UserPrivileges) ShowGrants(ctx sessionctx.Context, user *auth.UserIdentity, roles []*auth.RoleIdentity) (grants []string, err error) {
if SkipWithGrant {
return nil, ErrNonexistingGrant.GenWithStackByArgs("root", "%")
}
mysqlPrivilege := p.Handle.Get()
u := user.Username
h := user.Hostname
if len(user.AuthUsername) > 0 && len(user.AuthHostname) > 0 {
u = user.AuthUsername
h = user.AuthHostname
}
grants = mysqlPrivilege.showGrants(u, h, roles)
if len(grants) == 0 {
err = ErrNonexistingGrant.GenWithStackByArgs(u, h)
}
return
}
// ActiveRoles implements privilege.Manager ActiveRoles interface.
func (p *UserPrivileges) ActiveRoles(ctx sessionctx.Context, roleList []*auth.RoleIdentity) (bool, string) {
if SkipWithGrant {
return true, ""
}
mysqlPrivilege := p.Handle.Get()
u := p.user
h := p.host
for _, r := range roleList {
ok := mysqlPrivilege.FindRole(u, h, r)
if !ok {
logutil.BgLogger().Error("find role failed", zap.Stringer("role", r))
return false, r.String()
}
}
ctx.GetSessionVars().ActiveRoles = roleList
return true, ""
}
// FindEdge implements privilege.Manager FindRelationship interface.
func (p *UserPrivileges) FindEdge(ctx sessionctx.Context, role *auth.RoleIdentity, user *auth.UserIdentity) bool {
if SkipWithGrant {
return false
}
mysqlPrivilege := p.Handle.Get()
ok := mysqlPrivilege.FindRole(user.Username, user.Hostname, role)
if !ok {
logutil.BgLogger().Error("find role failed", zap.Stringer("role", role))
return false
}
return true
}
// GetDefaultRoles returns all default roles for certain user.
func (p *UserPrivileges) GetDefaultRoles(user, host string) []*auth.RoleIdentity {
if SkipWithGrant {
return make([]*auth.RoleIdentity, 0, 10)
}
mysqlPrivilege := p.Handle.Get()
ret := mysqlPrivilege.getDefaultRoles(user, host)
return ret
}
// GetAllRoles return all roles of user.
func (p *UserPrivileges) GetAllRoles(user, host string) []*auth.RoleIdentity {
if SkipWithGrant {
return make([]*auth.RoleIdentity, 0, 10)
}
mysqlPrivilege := p.Handle.Get()
return mysqlPrivilege.getAllRoles(user, host)
}
// IsDynamicPrivilege returns true if the DYNAMIC privilege is built-in or has been registered by a plugin
func (p *UserPrivileges) IsDynamicPrivilege(privNameInUpper string) bool {
for _, priv := range dynamicPrivs {
if privNameInUpper == priv {
return true
}
}
return false
}
// RegisterDynamicPrivilege is used by plugins to add new privileges to TiDB
func RegisterDynamicPrivilege(privNameInUpper string) error {
dynamicPrivLock.Lock()
defer dynamicPrivLock.Unlock()
for _, priv := range dynamicPrivs {
if privNameInUpper == priv {
return errors.New("privilege is already registered")
}
}
dynamicPrivs = append(dynamicPrivs, privNameInUpper)
return nil
}
| privilege/privileges/privileges.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0020751829724758863,
0.00022376514971256256,
0.00016295378736685961,
0.00016743180458433926,
0.00026593980146571994
] |
{
"id": 6,
"code_window": [
"\t\"go.uber.org/zap\"\n",
")\n",
"\n",
"type actionPessimisticLock struct {\n",
"\t*kv.LockCtx\n",
"}\n",
"type actionPessimisticRollback struct{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")\n",
"\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "add",
"edit_start_line_idx": 34
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"math"
"runtime"
"runtime/trace"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cznic/mathutil"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/planner"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
tikvutil "github.com/pingcap/tidb/store/tikv/util"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/admin"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/memory"
"go.uber.org/zap"
)
var (
_ Executor = &baseExecutor{}
_ Executor = &CheckTableExec{}
_ Executor = &HashAggExec{}
_ Executor = &HashJoinExec{}
_ Executor = &IndexLookUpExecutor{}
_ Executor = &IndexReaderExecutor{}
_ Executor = &LimitExec{}
_ Executor = &MaxOneRowExec{}
_ Executor = &MergeJoinExec{}
_ Executor = &ProjectionExec{}
_ Executor = &SelectionExec{}
_ Executor = &SelectLockExec{}
_ Executor = &ShowNextRowIDExec{}
_ Executor = &ShowDDLExec{}
_ Executor = &ShowDDLJobsExec{}
_ Executor = &ShowDDLJobQueriesExec{}
_ Executor = &SortExec{}
_ Executor = &StreamAggExec{}
_ Executor = &TableDualExec{}
_ Executor = &TableReaderExecutor{}
_ Executor = &TableScanExec{}
_ Executor = &TopNExec{}
_ Executor = &UnionExec{}
// GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker
GlobalMemoryUsageTracker *memory.Tracker
// GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker
GlobalDiskUsageTracker *disk.Tracker
)
type baseExecutor struct {
ctx sessionctx.Context
id int
schema *expression.Schema // output schema
initCap int
maxChunkSize int
children []Executor
retFieldTypes []*types.FieldType
runtimeStats *execdetails.BasicRuntimeStats
}
const (
// globalPanicStorageExceed represents the panic message when out of storage quota.
globalPanicStorageExceed string = "Out Of Global Storage Quota!"
// globalPanicMemoryExceed represents the panic message when out of memory limit.
globalPanicMemoryExceed string = "Out Of Global Memory Limit!"
)
// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota.
type globalPanicOnExceed struct {
memory.BaseOOMAction
mutex sync.Mutex // For synchronization.
}
func init() {
action := &globalPanicOnExceed{}
GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1)
GlobalMemoryUsageTracker.SetActionOnExceed(action)
GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1)
GlobalDiskUsageTracker.SetActionOnExceed(action)
}
// SetLogHook sets a hook for PanicOnExceed.
func (a *globalPanicOnExceed) SetLogHook(hook func(uint64)) {}
// Action panics when storage usage exceeds storage quota.
func (a *globalPanicOnExceed) Action(t *memory.Tracker) {
a.mutex.Lock()
defer a.mutex.Unlock()
msg := ""
switch t.Label() {
case memory.LabelForGlobalStorage:
msg = globalPanicStorageExceed
case memory.LabelForGlobalMemory:
msg = globalPanicMemoryExceed
default:
msg = "Out of Unknown Resource Quota!"
}
panic(msg)
}
// GetPriority get the priority of the Action
func (a *globalPanicOnExceed) GetPriority() int64 {
return memory.DefPanicPriority
}
// base returns the baseExecutor of an executor, don't override this method!
func (e *baseExecutor) base() *baseExecutor {
return e
}
// Open initializes children recursively and "childrenResults" according to children's schemas.
func (e *baseExecutor) Open(ctx context.Context) error {
for _, child := range e.children {
err := child.Open(ctx)
if err != nil {
return err
}
}
return nil
}
// Close closes all executors and release all resources.
func (e *baseExecutor) Close() error {
var firstErr error
for _, src := range e.children {
if err := src.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// Schema returns the current baseExecutor's schema. If it is nil, then create and return a new one.
func (e *baseExecutor) Schema() *expression.Schema {
if e.schema == nil {
return expression.NewSchema()
}
return e.schema
}
// newFirstChunk creates a new chunk to buffer current executor's result.
func newFirstChunk(e Executor) *chunk.Chunk {
base := e.base()
return chunk.New(base.retFieldTypes, base.initCap, base.maxChunkSize)
}
// newList creates a new List to buffer current executor's result.
func newList(e Executor) *chunk.List {
base := e.base()
return chunk.NewList(base.retFieldTypes, base.initCap, base.maxChunkSize)
}
// retTypes returns all output column types.
func retTypes(e Executor) []*types.FieldType {
base := e.base()
return base.retFieldTypes
}
// Next fills multiple rows into a chunk.
func (e *baseExecutor) Next(ctx context.Context, req *chunk.Chunk) error {
return nil
}
func (e *baseExecutor) updateDeltaForTableID(id int64) {
txnCtx := e.ctx.GetSessionVars().TxnCtx
txnCtx.UpdateDeltaForTable(id, 0, 0, map[int64]int64{})
}
func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, children ...Executor) baseExecutor {
e := baseExecutor{
children: children,
ctx: ctx,
id: id,
schema: schema,
initCap: ctx.GetSessionVars().InitChunkSize,
maxChunkSize: ctx.GetSessionVars().MaxChunkSize,
}
if ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil {
if e.id > 0 {
e.runtimeStats = &execdetails.BasicRuntimeStats{}
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(id, e.runtimeStats)
}
}
if schema != nil {
cols := schema.Columns
e.retFieldTypes = make([]*types.FieldType, len(cols))
for i := range cols {
e.retFieldTypes[i] = cols[i].RetType
}
}
return e
}
// Executor is the physical implementation of a algebra operator.
//
// In TiDB, all algebra operators are implemented as iterators, i.e., they
// support a simple Open-Next-Close protocol. See this paper for more details:
//
// "Volcano-An Extensible and Parallel Query Evaluation System"
//
// Different from Volcano's execution model, a "Next" function call in TiDB will
// return a batch of rows, other than a single row in Volcano.
// NOTE: Executors must call "chk.Reset()" before appending their results to it.
type Executor interface {
base() *baseExecutor
Open(context.Context) error
Next(ctx context.Context, req *chunk.Chunk) error
Close() error
Schema() *expression.Schema
}
// Next is a wrapper function on e.Next(), it handles some common codes.
func Next(ctx context.Context, e Executor, req *chunk.Chunk) error {
base := e.base()
if base.runtimeStats != nil {
start := time.Now()
defer func() { base.runtimeStats.Record(time.Since(start), req.NumRows()) }()
}
sessVars := base.ctx.GetSessionVars()
if atomic.LoadUint32(&sessVars.Killed) == 1 {
return ErrQueryInterrupted
}
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan(fmt.Sprintf("%T.Next", e), opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
if trace.IsEnabled() {
defer trace.StartRegion(ctx, fmt.Sprintf("%T.Next", e)).End()
}
err := e.Next(ctx, req)
if err != nil {
return err
}
// recheck whether the session/query is killed during the Next()
if atomic.LoadUint32(&sessVars.Killed) == 1 {
err = ErrQueryInterrupted
}
return err
}
// CancelDDLJobsExec represents a cancel DDL jobs executor.
type CancelDDLJobsExec struct {
baseExecutor
cursor int
jobIDs []int64
errs []error
}
// Next implements the Executor Next interface.
func (e *CancelDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if e.cursor >= len(e.jobIDs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobIDs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
req.AppendString(0, fmt.Sprintf("%d", e.jobIDs[i]))
if e.errs[i] != nil {
req.AppendString(1, fmt.Sprintf("error: %v", e.errs[i]))
} else {
req.AppendString(1, "successful")
}
}
e.cursor += numCurBatch
return nil
}
// ShowNextRowIDExec represents a show the next row ID executor.
type ShowNextRowIDExec struct {
baseExecutor
tblName *ast.TableName
done bool
}
// Next implements the Executor Next interface.
func (e *ShowNextRowIDExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
is := domain.GetDomain(e.ctx).InfoSchema()
tbl, err := is.TableByName(e.tblName.Schema, e.tblName.Name)
if err != nil {
return err
}
tblMeta := tbl.Meta()
allocators := tbl.Allocators(e.ctx)
for _, alloc := range allocators {
nextGlobalID, err := alloc.NextGlobalAutoID(tblMeta.ID)
if err != nil {
return err
}
var colName, idType string
switch alloc.GetType() {
case autoid.RowIDAllocType, autoid.AutoIncrementType:
idType = "AUTO_INCREMENT"
if col := tblMeta.GetAutoIncrementColInfo(); col != nil {
colName = col.Name.O
} else {
colName = model.ExtraHandleName.O
}
case autoid.AutoRandomType:
idType = "AUTO_RANDOM"
colName = tblMeta.GetPkName().O
case autoid.SequenceType:
idType = "SEQUENCE"
colName = ""
default:
return autoid.ErrInvalidAllocatorType.GenWithStackByArgs()
}
req.AppendString(0, e.tblName.Schema.O)
req.AppendString(1, e.tblName.Name.O)
req.AppendString(2, colName)
req.AppendInt64(3, nextGlobalID)
req.AppendString(4, idType)
}
e.done = true
return nil
}
// ShowDDLExec represents a show DDL executor.
type ShowDDLExec struct {
baseExecutor
ddlOwnerID string
selfID string
ddlInfo *admin.DDLInfo
done bool
}
// Next implements the Executor Next interface.
func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.done {
return nil
}
ddlJobs := ""
query := ""
l := len(e.ddlInfo.Jobs)
for i, job := range e.ddlInfo.Jobs {
ddlJobs += job.String()
query += job.Query
if i != l-1 {
ddlJobs += "\n"
query += "\n"
}
}
serverInfo, err := infosync.GetServerInfoByID(ctx, e.ddlOwnerID)
if err != nil {
return err
}
serverAddress := serverInfo.IP + ":" +
strconv.FormatUint(uint64(serverInfo.Port), 10)
req.AppendInt64(0, e.ddlInfo.SchemaVer)
req.AppendString(1, e.ddlOwnerID)
req.AppendString(2, serverAddress)
req.AppendString(3, ddlJobs)
req.AppendString(4, e.selfID)
req.AppendString(5, query)
e.done = true
return nil
}
// ShowDDLJobsExec represent a show DDL jobs executor.
type ShowDDLJobsExec struct {
baseExecutor
DDLJobRetriever
jobNumber int
is infoschema.InfoSchema
}
// DDLJobRetriever retrieve the DDLJobs.
// nolint:structcheck
type DDLJobRetriever struct {
runningJobs []*model.Job
historyJobIter *meta.LastJobIterator
cursor int
is infoschema.InfoSchema
activeRoles []*auth.RoleIdentity
cacheJobs []*model.Job
}
func (e *DDLJobRetriever) initial(txn kv.Transaction) error {
jobs, err := admin.GetDDLJobs(txn)
if err != nil {
return err
}
m := meta.NewMeta(txn)
e.historyJobIter, err = m.GetLastHistoryDDLJobsIterator()
if err != nil {
return err
}
e.runningJobs = jobs
e.cursor = 0
return nil
}
func (e *DDLJobRetriever) appendJobToChunk(req *chunk.Chunk, job *model.Job, checker privilege.Manager) {
schemaName := job.SchemaName
tableName := ""
finishTS := uint64(0)
if job.BinlogInfo != nil {
finishTS = job.BinlogInfo.FinishedTS
if job.BinlogInfo.TableInfo != nil {
tableName = job.BinlogInfo.TableInfo.Name.L
}
if len(schemaName) == 0 && job.BinlogInfo.DBInfo != nil {
schemaName = job.BinlogInfo.DBInfo.Name.L
}
}
// For compatibility, the old version of DDL Job wasn't store the schema name and table name.
if len(schemaName) == 0 {
schemaName = getSchemaName(e.is, job.SchemaID)
}
if len(tableName) == 0 {
tableName = getTableName(e.is, job.TableID)
}
startTime := ts2Time(job.StartTS)
finishTime := ts2Time(finishTS)
// Check the privilege.
if checker != nil && !checker.RequestVerification(e.activeRoles, strings.ToLower(schemaName), strings.ToLower(tableName), "", mysql.AllPrivMask) {
return
}
req.AppendInt64(0, job.ID)
req.AppendString(1, schemaName)
req.AppendString(2, tableName)
req.AppendString(3, job.Type.String())
req.AppendString(4, job.SchemaState.String())
req.AppendInt64(5, job.SchemaID)
req.AppendInt64(6, job.TableID)
req.AppendInt64(7, job.RowCount)
req.AppendTime(8, startTime)
if finishTS > 0 {
req.AppendTime(9, finishTime)
} else {
req.AppendNull(9)
}
req.AppendString(10, job.State.String())
}
func ts2Time(timestamp uint64) types.Time {
duration := time.Duration(math.Pow10(9-int(types.DefaultFsp))) * time.Nanosecond
t := model.TSConvert2Time(timestamp)
t.Truncate(duration)
return types.NewTime(types.FromGoTime(t), mysql.TypeDatetime, types.DefaultFsp)
}
// ShowDDLJobQueriesExec represents a show DDL job queries executor.
// The jobs id that is given by 'admin show ddl job queries' statement,
// only be searched in the latest 10 history jobs
type ShowDDLJobQueriesExec struct {
baseExecutor
cursor int
jobs []*model.Job
jobIDs []int64
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
jobs, err := admin.GetDDLJobs(txn)
if err != nil {
return err
}
historyJobs, err := admin.GetHistoryDDLJobs(txn, admin.DefNumHistoryJobs)
if err != nil {
return err
}
e.jobs = append(e.jobs, jobs...)
e.jobs = append(e.jobs, historyJobs...)
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobQueriesExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if e.cursor >= len(e.jobs) {
return nil
}
if len(e.jobIDs) >= len(e.jobs) {
return nil
}
numCurBatch := mathutil.Min(req.Capacity(), len(e.jobs)-e.cursor)
for _, id := range e.jobIDs {
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
if id == e.jobs[i].ID {
req.AppendString(0, e.jobs[i].Query)
}
}
}
e.cursor += numCurBatch
return nil
}
// Open implements the Executor Open interface.
func (e *ShowDDLJobsExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
e.DDLJobRetriever.is = e.is
if e.jobNumber == 0 {
e.jobNumber = admin.DefNumHistoryJobs
}
err = e.DDLJobRetriever.initial(txn)
if err != nil {
return err
}
return nil
}
// Next implements the Executor Next interface.
func (e *ShowDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if (e.cursor - len(e.runningJobs)) >= e.jobNumber {
return nil
}
count := 0
// Append running ddl jobs.
if e.cursor < len(e.runningJobs) {
numCurBatch := mathutil.Min(req.Capacity(), len(e.runningJobs)-e.cursor)
for i := e.cursor; i < e.cursor+numCurBatch; i++ {
e.appendJobToChunk(req, e.runningJobs[i], nil)
}
e.cursor += numCurBatch
count += numCurBatch
}
// Append history ddl jobs.
var err error
if count < req.Capacity() {
num := req.Capacity() - count
remainNum := e.jobNumber - (e.cursor - len(e.runningJobs))
num = mathutil.Min(num, remainNum)
e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs)
if err != nil {
return err
}
for _, job := range e.cacheJobs {
e.appendJobToChunk(req, job, nil)
}
e.cursor += len(e.cacheJobs)
}
return nil
}
func getSchemaName(is infoschema.InfoSchema, id int64) string {
var schemaName string
DBInfo, ok := is.SchemaByID(id)
if ok {
schemaName = DBInfo.Name.O
return schemaName
}
return schemaName
}
func getTableName(is infoschema.InfoSchema, id int64) string {
var tableName string
table, ok := is.TableByID(id)
if ok {
tableName = table.Meta().Name.O
return tableName
}
return tableName
}
// CheckTableExec represents a check table executor.
// It is built from the "admin check table" statement, and it checks if the
// index matches the records in the table.
type CheckTableExec struct {
baseExecutor
dbName string
table table.Table
indexInfos []*model.IndexInfo
srcs []*IndexLookUpExecutor
done bool
is infoschema.InfoSchema
exitCh chan struct{}
retCh chan error
checkIndex bool
}
// Open implements the Executor Open interface.
func (e *CheckTableExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
for _, src := range e.srcs {
if err := src.Open(ctx); err != nil {
return errors.Trace(err)
}
}
e.done = false
return nil
}
// Close implements the Executor Close interface.
func (e *CheckTableExec) Close() error {
var firstErr error
for _, src := range e.srcs {
if err := src.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (e *CheckTableExec) checkTableIndexHandle(ctx context.Context, idxInfo *model.IndexInfo) error {
// For partition table, there will be multi same index indexLookUpReaders on different partitions.
for _, src := range e.srcs {
if src.index.Name.L == idxInfo.Name.L {
err := e.checkIndexHandle(ctx, src)
if err != nil {
return err
}
}
}
return nil
}
func (e *CheckTableExec) checkIndexHandle(ctx context.Context, src *IndexLookUpExecutor) error {
cols := src.schema.Columns
retFieldTypes := make([]*types.FieldType, len(cols))
for i := range cols {
retFieldTypes[i] = cols[i].RetType
}
chk := chunk.New(retFieldTypes, e.initCap, e.maxChunkSize)
var err error
for {
err = Next(ctx, src, chk)
if err != nil {
break
}
if chk.NumRows() == 0 {
break
}
select {
case <-e.exitCh:
return nil
default:
}
}
e.retCh <- errors.Trace(err)
return errors.Trace(err)
}
func (e *CheckTableExec) handlePanic(r interface{}) {
if r != nil {
e.retCh <- errors.Errorf("%v", r)
}
}
// Next implements the Executor Next interface.
func (e *CheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error {
if e.done || len(e.srcs) == 0 {
return nil
}
defer func() { e.done = true }()
idxNames := make([]string, 0, len(e.indexInfos))
for _, idx := range e.indexInfos {
idxNames = append(idxNames, idx.Name.O)
}
greater, idxOffset, err := admin.CheckIndicesCount(e.ctx, e.dbName, e.table.Meta().Name.O, idxNames)
if err != nil {
// For admin check index statement, for speed up and compatibility, doesn't do below checks.
if e.checkIndex {
return errors.Trace(err)
}
if greater == admin.IdxCntGreater {
err = e.checkTableIndexHandle(ctx, e.indexInfos[idxOffset])
} else if greater == admin.TblCntGreater {
err = e.checkTableRecord(idxOffset)
}
if err != nil && admin.ErrDataInConsistent.Equal(err) {
return ErrAdminCheckTable.GenWithStack("%v err:%v", e.table.Meta().Name, err)
}
return errors.Trace(err)
}
// The number of table rows is equal to the number of index rows.
// TODO: Make the value of concurrency adjustable. And we can consider the number of records.
concurrency := 3
wg := sync.WaitGroup{}
for i := range e.srcs {
wg.Add(1)
go func(num int) {
defer wg.Done()
util.WithRecovery(func() {
err1 := e.checkIndexHandle(ctx, e.srcs[num])
if err1 != nil {
logutil.Logger(ctx).Info("check index handle failed", zap.Error(err1))
}
}, e.handlePanic)
}(i)
if (i+1)%concurrency == 0 {
wg.Wait()
}
}
for i := 0; i < len(e.srcs); i++ {
err = <-e.retCh
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (e *CheckTableExec) checkTableRecord(idxOffset int) error {
idxInfo := e.indexInfos[idxOffset]
txn, err := e.ctx.Txn(true)
if err != nil {
return err
}
if e.table.Meta().GetPartitionInfo() == nil {
idx := tables.NewIndex(e.table.Meta().ID, e.table.Meta(), idxInfo)
return admin.CheckRecordAndIndex(e.ctx, txn, e.table, idx)
}
info := e.table.Meta().GetPartitionInfo()
for _, def := range info.Definitions {
pid := def.ID
partition := e.table.(table.PartitionedTable).GetPartition(pid)
idx := tables.NewIndex(def.ID, e.table.Meta(), idxInfo)
if err := admin.CheckRecordAndIndex(e.ctx, txn, partition, idx); err != nil {
return errors.Trace(err)
}
}
return nil
}
// ShowSlowExec represents the executor of showing the slow queries.
// It is build from the "admin show slow" statement:
// admin show slow top [internal | all] N
// admin show slow recent N
type ShowSlowExec struct {
baseExecutor
ShowSlow *ast.ShowSlow
result []*domain.SlowQueryInfo
cursor int
}
// Open implements the Executor Open interface.
func (e *ShowSlowExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
dom := domain.GetDomain(e.ctx)
e.result = dom.ShowSlowQuery(e.ShowSlow)
return nil
}
// Next implements the Executor Next interface.
func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= len(e.result) {
return nil
}
for e.cursor < len(e.result) && req.NumRows() < e.maxChunkSize {
slow := e.result[e.cursor]
req.AppendString(0, slow.SQL)
req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), mysql.TypeTimestamp, types.MaxFsp))
req.AppendDuration(2, types.Duration{Duration: slow.Duration, Fsp: types.MaxFsp})
req.AppendString(3, slow.Detail.String())
if slow.Succ {
req.AppendInt64(4, 1)
} else {
req.AppendInt64(4, 0)
}
req.AppendUint64(5, slow.ConnID)
req.AppendUint64(6, slow.TxnTS)
req.AppendString(7, slow.User)
req.AppendString(8, slow.DB)
req.AppendString(9, slow.TableIDs)
req.AppendString(10, slow.IndexNames)
if slow.Internal {
req.AppendInt64(11, 1)
} else {
req.AppendInt64(11, 0)
}
req.AppendString(12, slow.Digest)
e.cursor++
}
return nil
}
// SelectLockExec represents a select lock executor.
// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement.
// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor.
// After the execution, the keys are buffered in transaction, and will be sent to KV
// when doing commit. If there is any key already locked by another transaction,
// the transaction will rollback and retry.
type SelectLockExec struct {
baseExecutor
Lock *ast.SelectLockInfo
keys []kv.Key
tblID2Handle map[int64][]plannercore.HandleCols
partitionedTable []table.PartitionedTable
// tblID2Table is cached to reduce cost.
tblID2Table map[int64]table.PartitionedTable
}
// Open implements the Executor Open interface.
func (e *SelectLockExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
if len(e.tblID2Handle) > 0 && len(e.partitionedTable) > 0 {
e.tblID2Table = make(map[int64]table.PartitionedTable, len(e.partitionedTable))
for id := range e.tblID2Handle {
for _, p := range e.partitionedTable {
if id == p.Meta().ID {
e.tblID2Table[id] = p
}
}
}
}
return nil
}
// Next implements the Executor Next interface.
func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
err := Next(ctx, e.children[0], req)
if err != nil {
return err
}
// If there's no handle or it's not a `SELECT FOR UPDATE` statement.
if len(e.tblID2Handle) == 0 || (!plannercore.IsSelectForUpdateLockType(e.Lock.LockType)) {
return nil
}
if req.NumRows() > 0 {
iter := chunk.NewIterator4Chunk(req)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
for id, cols := range e.tblID2Handle {
physicalID := id
if pt, ok := e.tblID2Table[id]; ok {
// On a partitioned table, we have to use physical ID to encode the lock key!
p, err := pt.GetPartitionByRow(e.ctx, row.GetDatumRow(e.base().retFieldTypes))
if err != nil {
return err
}
physicalID = p.GetPhysicalID()
}
for _, col := range cols {
handle, err := col.BuildHandle(row)
if err != nil {
return err
}
e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physicalID, handle))
}
}
}
return nil
}
lockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout
if e.Lock.LockType == ast.SelectLockForUpdateNoWait {
lockWaitTime = kv.LockNoWait
} else if e.Lock.LockType == ast.SelectLockForUpdateWaitN {
lockWaitTime = int64(e.Lock.WaitSec) * 1000
}
if len(e.tblID2Handle) > 0 {
for id := range e.tblID2Handle {
e.updateDeltaForTableID(id)
}
}
if len(e.partitionedTable) > 0 {
for _, p := range e.partitionedTable {
pid := p.Meta().ID
e.updateDeltaForTableID(pid)
}
}
return doLockKeys(ctx, e.ctx, newLockCtx(e.ctx.GetSessionVars(), lockWaitTime), e.keys...)
}
func newLockCtx(seVars *variable.SessionVars, lockWaitTime int64) *tikvstore.LockCtx {
return &tikvstore.LockCtx{
Killed: &seVars.Killed,
ForUpdateTS: seVars.TxnCtx.GetForUpdateTS(),
LockWaitTime: lockWaitTime,
WaitStartTime: seVars.StmtCtx.GetLockWaitStartTime(),
PessimisticLockWaited: &seVars.StmtCtx.PessimisticLockWaited,
LockKeysDuration: &seVars.StmtCtx.LockKeysDuration,
LockKeysCount: &seVars.StmtCtx.LockKeysCount,
LockExpired: &seVars.TxnCtx.LockExpire,
}
}
// doLockKeys is the main entry for pessimistic lock keys
// waitTime means the lock operation will wait in milliseconds if target key is already
// locked by others. used for (select for update nowait) situation
// except 0 means alwaysWait 1 means nowait
func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error {
sctx := se.GetSessionVars().StmtCtx
if !sctx.InUpdateStmt && !sctx.InDeleteStmt {
atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1)
}
// Lock keys only once when finished fetching all results.
txn, err := se.Txn(true)
if err != nil {
return err
}
var lockKeyStats *tikvutil.LockKeysDetails
ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats)
err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...)
if lockKeyStats != nil {
sctx.MergeLockKeysExecDetails(lockKeyStats)
}
return err
}
// LimitExec represents limit executor
// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum.
type LimitExec struct {
baseExecutor
begin uint64
end uint64
cursor uint64
// meetFirstBatch represents whether we have met the first valid Chunk from child.
meetFirstBatch bool
childResult *chunk.Chunk
// columnIdxsUsedByChild keep column indexes of child executor used for inline projection
columnIdxsUsedByChild []int
}
// Next implements the Executor Next interface.
func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.cursor >= e.end {
return nil
}
for !e.meetFirstBatch {
// transfer req's requiredRows to childResult and then adjust it in childResult
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize)
err := Next(ctx, e.children[0], e.adjustRequiredRows(e.childResult))
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if newCursor := e.cursor + batchSize; newCursor >= e.begin {
e.meetFirstBatch = true
begin, end := e.begin-e.cursor, batchSize
if newCursor > e.end {
end = e.end - e.cursor
}
e.cursor += end
if begin == end {
break
}
if e.columnIdxsUsedByChild != nil {
req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end))
} else {
req.Append(e.childResult, int(begin), int(end))
}
return nil
}
e.cursor += batchSize
}
e.childResult.Reset()
e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize)
e.adjustRequiredRows(e.childResult)
err := Next(ctx, e.children[0], e.childResult)
if err != nil {
return err
}
batchSize := uint64(e.childResult.NumRows())
// no more data.
if batchSize == 0 {
return nil
}
if e.cursor+batchSize > e.end {
e.childResult.TruncateTo(int(e.end - e.cursor))
batchSize = e.end - e.cursor
}
e.cursor += batchSize
if e.columnIdxsUsedByChild != nil {
for i, childIdx := range e.columnIdxsUsedByChild {
if err = req.SwapColumn(i, e.childResult, childIdx); err != nil {
return err
}
}
} else {
req.SwapColumns(e.childResult)
}
return nil
}
// Open implements the Executor Open interface.
func (e *LimitExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.childResult = newFirstChunk(e.children[0])
e.cursor = 0
e.meetFirstBatch = e.begin == 0
return nil
}
// Close implements the Executor Close interface.
func (e *LimitExec) Close() error {
e.childResult = nil
return e.baseExecutor.Close()
}
func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk {
// the limit of maximum number of rows the LimitExec should read
limitTotal := int(e.end - e.cursor)
var limitRequired int
if e.cursor < e.begin {
// if cursor is less than begin, it have to read (begin-cursor) rows to ignore
// and then read chk.RequiredRows() rows to return,
// so the limit is (begin-cursor)+chk.RequiredRows().
limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows()
} else {
// if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return.
limitRequired = chk.RequiredRows()
}
return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.maxChunkSize)
}
func init() {
// While doing optimization in the plan package, we need to execute uncorrelated subquery,
// but the plan package cannot import the executor package because of the dependency cycle.
// So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle.
plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p plannercore.PhysicalPlan, is infoschema.InfoSchema, sctx sessionctx.Context) ([]types.Datum, error) {
defer func(begin time.Time) {
s := sctx.GetSessionVars()
s.RewritePhaseInfo.PreprocessSubQueries++
s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin)
}(time.Now())
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("executor.EvalSubQuery", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
e := &executorBuilder{is: is, ctx: sctx}
exec := e.build(p)
if e.err != nil {
return nil, e.err
}
err := exec.Open(ctx)
defer terror.Call(exec.Close)
if err != nil {
return nil, err
}
chk := newFirstChunk(exec)
err = Next(ctx, exec, chk)
if err != nil {
return nil, err
}
if chk.NumRows() == 0 {
return nil, nil
}
row := chk.GetRow(0).GetDatumRow(retTypes(exec))
return row, err
}
}
// TableDualExec represents a dual table executor.
type TableDualExec struct {
baseExecutor
// numDualRows can only be 0 or 1.
numDualRows int
numReturned int
}
// Open implements the Executor Open interface.
func (e *TableDualExec) Open(ctx context.Context) error {
e.numReturned = 0
return nil
}
// Next implements the Executor Next interface.
func (e *TableDualExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.numReturned >= e.numDualRows {
return nil
}
if e.Schema().Len() == 0 {
req.SetNumVirtualRows(1)
} else {
for i := range e.Schema().Columns {
req.AppendNull(i)
}
}
e.numReturned = e.numDualRows
return nil
}
// SelectionExec represents a filter executor.
type SelectionExec struct {
baseExecutor
batched bool
filters []expression.Expression
selected []bool
inputIter *chunk.Iterator4Chunk
inputRow chunk.Row
childResult *chunk.Chunk
memTracker *memory.Tracker
}
// Open implements the Executor Open interface.
func (e *SelectionExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
return e.open(ctx)
}
func (e *SelectionExec) open(ctx context.Context) error {
e.memTracker = memory.NewTracker(e.id, -1)
e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker)
e.childResult = newFirstChunk(e.children[0])
e.memTracker.Consume(e.childResult.MemoryUsage())
e.batched = expression.Vectorizable(e.filters)
if e.batched {
e.selected = make([]bool, 0, chunk.InitialCapacity)
}
e.inputIter = chunk.NewIterator4Chunk(e.childResult)
e.inputRow = e.inputIter.End()
return nil
}
// Close implements plannercore.Plan Close interface.
func (e *SelectionExec) Close() error {
e.memTracker.Consume(-e.childResult.MemoryUsage())
e.childResult = nil
e.selected = nil
return e.baseExecutor.Close()
}
// Next implements the Executor Next interface.
func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if !e.batched {
return e.unBatchedNext(ctx, req)
}
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
if !e.selected[e.inputRow.Idx()] {
continue
}
if req.IsFull() {
return nil
}
req.AppendRow(e.inputRow)
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.children[0], e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
e.selected, err = expression.VectorizedFilter(e.ctx, e.filters, e.inputIter, e.selected)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
}
}
// unBatchedNext filters input rows one by one and returns once an input row is selected.
// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0",
// we have to set batch size to 1 to do the evaluation of filter and projection.
func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error {
for {
for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() {
selected, _, err := expression.EvalBool(e.ctx, e.filters, e.inputRow)
if err != nil {
return err
}
if selected {
chk.AppendRow(e.inputRow)
e.inputRow = e.inputIter.Next()
return nil
}
}
mSize := e.childResult.MemoryUsage()
err := Next(ctx, e.children[0], e.childResult)
e.memTracker.Consume(e.childResult.MemoryUsage() - mSize)
if err != nil {
return err
}
e.inputRow = e.inputIter.Begin()
// no more data.
if e.childResult.NumRows() == 0 {
return nil
}
}
}
// TableScanExec is a table scan executor without result fields.
type TableScanExec struct {
baseExecutor
t table.Table
columns []*model.ColumnInfo
virtualTableChunkList *chunk.List
virtualTableChunkIdx int
}
// Next implements the Executor Next interface.
func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
return e.nextChunk4InfoSchema(ctx, req)
}
func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error {
chk.GrowAndReset(e.maxChunkSize)
if e.virtualTableChunkList == nil {
e.virtualTableChunkList = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize)
columns := make([]*table.Column, e.schema.Len())
for i, colInfo := range e.columns {
columns[i] = table.ToColumn(colInfo)
}
mutableRow := chunk.MutRowFromTypes(retTypes(e))
type tableIter interface {
IterRecords(sessionctx.Context, []*table.Column, table.RecordIterFunc) error
}
err := (e.t.(tableIter)).IterRecords(e.ctx, columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) {
mutableRow.SetDatums(rec...)
e.virtualTableChunkList.AppendRow(mutableRow.ToRow())
return true, nil
})
if err != nil {
return err
}
}
// no more data.
if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() {
return nil
}
virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx)
e.virtualTableChunkIdx++
chk.SwapColumns(virtualTableChunk)
return nil
}
// Open implements the Executor Open interface.
func (e *TableScanExec) Open(ctx context.Context) error {
e.virtualTableChunkList = nil
return nil
}
// MaxOneRowExec checks if the number of rows that a query returns is at maximum one.
// It's built from subquery expression.
type MaxOneRowExec struct {
baseExecutor
evaluated bool
}
// Open implements the Executor Open interface.
func (e *MaxOneRowExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.evaluated = false
return nil
}
// Next implements the Executor Next interface.
func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.evaluated {
return nil
}
e.evaluated = true
err := Next(ctx, e.children[0], req)
if err != nil {
return err
}
if num := req.NumRows(); num == 0 {
for i := range e.schema.Columns {
req.AppendNull(i)
}
return nil
} else if num != 1 {
return ErrSubqueryMoreThan1Row
}
childChunk := newFirstChunk(e.children[0])
err = Next(ctx, e.children[0], childChunk)
if err != nil {
return err
}
if childChunk.NumRows() != 0 {
return ErrSubqueryMoreThan1Row
}
return nil
}
// UnionExec pulls all it's children's result and returns to its parent directly.
// A "resultPuller" is started for every child to pull result from that child and push it to the "resultPool", the used
// "Chunk" is obtained from the corresponding "resourcePool". All resultPullers are running concurrently.
// +----------------+
// +---> resourcePool 1 ---> | resultPuller 1 |-----+
// | +----------------+ |
// | |
// | +----------------+ v
// +---> resourcePool 2 ---> | resultPuller 2 |-----> resultPool ---+
// | +----------------+ ^ |
// | ...... | |
// | +----------------+ | |
// +---> resourcePool n ---> | resultPuller n |-----+ |
// | +----------------+ |
// | |
// | +-------------+ |
// |--------------------------| main thread | <---------------------+
// +-------------+
type UnionExec struct {
baseExecutor
concurrency int
childIDChan chan int
stopFetchData atomic.Value
finished chan struct{}
resourcePools []chan *chunk.Chunk
resultPool chan *unionWorkerResult
results []*chunk.Chunk
wg sync.WaitGroup
initialized bool
}
// unionWorkerResult stores the result for a union worker.
// A "resultPuller" is started for every child to pull result from that child, unionWorkerResult is used to store that pulled result.
// "src" is used for Chunk reuse: after pulling result from "resultPool", main-thread must push a valid unused Chunk to "src" to
// enable the corresponding "resultPuller" continue to work.
type unionWorkerResult struct {
chk *chunk.Chunk
err error
src chan<- *chunk.Chunk
}
func (e *UnionExec) waitAllFinished() {
e.wg.Wait()
close(e.resultPool)
}
// Open implements the Executor Open interface.
func (e *UnionExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}
e.stopFetchData.Store(false)
e.initialized = false
e.finished = make(chan struct{})
return nil
}
func (e *UnionExec) initialize(ctx context.Context) {
if e.concurrency > len(e.children) {
e.concurrency = len(e.children)
}
for i := 0; i < e.concurrency; i++ {
e.results = append(e.results, newFirstChunk(e.children[0]))
}
e.resultPool = make(chan *unionWorkerResult, e.concurrency)
e.resourcePools = make([]chan *chunk.Chunk, e.concurrency)
e.childIDChan = make(chan int, len(e.children))
for i := 0; i < e.concurrency; i++ {
e.resourcePools[i] = make(chan *chunk.Chunk, 1)
e.resourcePools[i] <- e.results[i]
e.wg.Add(1)
go e.resultPuller(ctx, i)
}
for i := 0; i < len(e.children); i++ {
e.childIDChan <- i
}
close(e.childIDChan)
go e.waitAllFinished()
}
func (e *UnionExec) resultPuller(ctx context.Context, workerID int) {
result := &unionWorkerResult{
err: nil,
chk: nil,
src: e.resourcePools[workerID],
}
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
stackSize := runtime.Stack(buf, false)
buf = buf[:stackSize]
logutil.Logger(ctx).Error("resultPuller panicked", zap.String("stack", string(buf)))
result.err = errors.Errorf("%v", r)
e.resultPool <- result
e.stopFetchData.Store(true)
}
e.wg.Done()
}()
for childID := range e.childIDChan {
for {
if e.stopFetchData.Load().(bool) {
return
}
select {
case <-e.finished:
return
case result.chk = <-e.resourcePools[workerID]:
}
result.err = Next(ctx, e.children[childID], result.chk)
if result.err == nil && result.chk.NumRows() == 0 {
e.resourcePools[workerID] <- result.chk
break
}
e.resultPool <- result
if result.err != nil {
e.stopFetchData.Store(true)
return
}
}
}
}
// Next implements the Executor Next interface.
func (e *UnionExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.GrowAndReset(e.maxChunkSize)
if !e.initialized {
e.initialize(ctx)
e.initialized = true
}
result, ok := <-e.resultPool
if !ok {
return nil
}
if result.err != nil {
return errors.Trace(result.err)
}
if result.chk.NumCols() != req.NumCols() {
return errors.Errorf("Internal error: UnionExec chunk column count mismatch, req: %d, result: %d",
req.NumCols(), result.chk.NumCols())
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
// Close implements the Executor Close interface.
func (e *UnionExec) Close() error {
if e.finished != nil {
close(e.finished)
}
e.results = nil
if e.resultPool != nil {
for range e.resultPool {
}
}
e.resourcePools = nil
if e.childIDChan != nil {
for range e.childIDChan {
}
}
return e.baseExecutor.Close()
}
// ResetContextOfStmt resets the StmtContext and session variables.
// Before every execution, we must clear statement context.
func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
vars := ctx.GetSessionVars()
sc := &stmtctx.StatementContext{
TimeZone: vars.Location(),
MemTracker: memory.NewTracker(memory.LabelForSQLText, vars.MemQuotaQuery),
DiskTracker: disk.NewTracker(memory.LabelForSQLText, -1),
TaskID: stmtctx.AllocateTaskID(),
}
sc.MemTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker)
globalConfig := config.GetGlobalConfig()
if globalConfig.OOMUseTmpStorage && GlobalDiskUsageTracker != nil {
sc.DiskTracker.AttachToGlobalTracker(GlobalDiskUsageTracker)
}
switch globalConfig.OOMAction {
case config.OOMActionCancel:
action := &memory.PanicOnExceed{ConnID: ctx.GetSessionVars().ConnectionID}
action.SetLogHook(domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
sc.MemTracker.SetActionOnExceed(action)
case config.OOMActionLog:
fallthrough
default:
action := &memory.LogOnExceed{ConnID: ctx.GetSessionVars().ConnectionID}
action.SetLogHook(domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota)
sc.MemTracker.SetActionOnExceed(action)
}
if execStmt, ok := s.(*ast.ExecuteStmt); ok {
s, err = planner.GetPreparedStmt(execStmt, vars)
if err != nil {
return
}
}
// execute missed stmtID uses empty sql
sc.OriginalSQL = s.Text()
if explainStmt, ok := s.(*ast.ExplainStmt); ok {
sc.InExplainStmt = true
sc.IgnoreExplainIDSuffix = (strings.ToLower(explainStmt.Format) == ast.ExplainFormatBrief)
s = explainStmt.Stmt
}
if _, ok := s.(*ast.ExplainForStmt); ok {
sc.InExplainStmt = true
}
// TODO: Many same bool variables here.
// We should set only two variables (
// IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and
// pushing them down to TiKV as flags.
switch stmt := s.(type) {
case *ast.UpdateStmt:
ResetUpdateStmtCtx(sc, stmt, vars)
case *ast.DeleteStmt:
sc.InDeleteStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
case *ast.InsertStmt:
sc.InInsertStmt = true
// For insert statement (not for update statement), disabling the StrictSQLMode
// should make TruncateAsWarning and DividedByZeroAsWarning,
// but should not make DupKeyAsWarning.
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.IgnoreNoPartition = stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
case *ast.CreateTableStmt, *ast.AlterTableStmt:
sc.InCreateOrAlterStmt = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || sc.AllowInvalidDate
case *ast.LoadDataStmt:
sc.DupKeyAsWarning = true
sc.BadNullAsWarning = true
sc.TruncateAsWarning = !vars.StrictSQLMode
sc.InLoadDataStmt = true
// return warning instead of error when load data meet no partition for value
sc.IgnoreNoPartition = true
case *ast.SelectStmt:
sc.InSelectStmt = true
// see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict
// said "For statements such as SELECT that do not change data, invalid values
// generate a warning in strict mode, not an error."
// and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html
sc.OverflowAsWarning = true
// Return warning for truncate error in selection.
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if opts := stmt.SelectStmtOpts; opts != nil {
sc.Priority = opts.Priority
sc.NotFillCache = !opts.SQLCache
}
case *ast.SetOprStmt:
sc.InSelectStmt = true
sc.OverflowAsWarning = true
sc.TruncateAsWarning = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
case *ast.ShowStmt:
sc.IgnoreTruncate = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors {
sc.InShowWarning = true
sc.SetWarnings(vars.StmtCtx.GetWarnings())
}
case *ast.SplitRegionStmt:
sc.IgnoreTruncate = false
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
default:
sc.IgnoreTruncate = true
sc.IgnoreZeroInDate = true
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
}
vars.PreparedParams = vars.PreparedParams[:0]
if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority {
sc.Priority = priority
}
if vars.StmtCtx.LastInsertID > 0 {
sc.PrevLastInsertID = vars.StmtCtx.LastInsertID
} else {
sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID
}
sc.PrevAffectedRows = 0
if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt {
sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows())
} else if vars.StmtCtx.InSelectStmt {
sc.PrevAffectedRows = -1
}
if globalConfig.EnableCollectExecutionInfo {
sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl()
}
sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool)
errCount, warnCount := vars.StmtCtx.NumErrorWarnings()
vars.SysErrorCount = errCount
vars.SysWarningCount = warnCount
vars.StmtCtx = sc
vars.PrevFoundInPlanCache = vars.FoundInPlanCache
vars.FoundInPlanCache = false
vars.ClearStmtVars()
vars.PrevFoundInBinding = vars.FoundInBinding
vars.FoundInBinding = false
return
}
// ResetUpdateStmtCtx resets statement context for UpdateStmt.
func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) {
sc.InUpdateStmt = true
sc.DupKeyAsWarning = stmt.IgnoreErr
sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.TruncateAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr
sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode()
sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate
sc.Priority = stmt.Priority
sc.IgnoreNoPartition = stmt.IgnoreErr
}
// FillVirtualColumnValue will calculate the virtual column value by evaluating generated
// expression using rows from a chunk, and then fill this value into the chunk
func FillVirtualColumnValue(virtualRetTypes []*types.FieldType, virtualColumnIndex []int,
schema *expression.Schema, columns []*model.ColumnInfo, sctx sessionctx.Context, req *chunk.Chunk) error {
virCols := chunk.NewChunkWithCapacity(virtualRetTypes, req.Capacity())
iter := chunk.NewIterator4Chunk(req)
for i, idx := range virtualColumnIndex {
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
datum, err := schema.Columns[idx].EvalVirtualColumn(row)
if err != nil {
return err
}
// Because the expression might return different type from
// the generated column, we should wrap a CAST on the result.
castDatum, err := table.CastValue(sctx, datum, columns[idx], false, true)
if err != nil {
return err
}
// Handle the bad null error.
if (mysql.HasNotNullFlag(columns[idx].Flag) || mysql.HasPreventNullInsertFlag(columns[idx].Flag)) && castDatum.IsNull() {
castDatum = table.GetZeroValue(columns[idx])
}
virCols.AppendDatum(i, &castDatum)
}
req.SetCol(idx, virCols.Column(i))
}
return nil
}
| executor/executor.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.1788731813430786,
0.0012655509635806084,
0.00015925502520985901,
0.00016960017092060298,
0.013378902338445187
] |
{
"id": 6,
"code_window": [
"\t\"go.uber.org/zap\"\n",
")\n",
"\n",
"type actionPessimisticLock struct {\n",
"\t*kv.LockCtx\n",
"}\n",
"type actionPessimisticRollback struct{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")\n",
"\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "add",
"edit_start_line_idx": 34
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"math"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/helper"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
// SplitIndexRegionExec represents a split index regions executor.
type SplitIndexRegionExec struct {
baseExecutor
tableInfo *model.TableInfo
partitionNames []model.CIStr
indexInfo *model.IndexInfo
lower []types.Datum
upper []types.Datum
num int
valueLists [][]types.Datum
splitIdxKeys [][]byte
done bool
splitRegionResult
}
// nolint:structcheck
type splitRegionResult struct {
splitRegions int
finishScatterNum int
}
// Open implements the Executor Open interface.
func (e *SplitIndexRegionExec) Open(ctx context.Context) (err error) {
e.splitIdxKeys, err = e.getSplitIdxKeys()
return err
}
// Next implements the Executor Next interface.
func (e *SplitIndexRegionExec) Next(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
if e.done {
return nil
}
e.done = true
if err := e.splitIndexRegion(ctx); err != nil {
return err
}
appendSplitRegionResultToChunk(chk, e.splitRegions, e.finishScatterNum)
return nil
}
// checkScatterRegionFinishBackOff is the back off time that used to check if a region has finished scattering before split region timeout.
const checkScatterRegionFinishBackOff = 50
// splitIndexRegion is used to split index regions.
func (e *SplitIndexRegionExec) splitIndexRegion(ctx context.Context) error {
store := e.ctx.GetStore()
s, ok := store.(kv.SplittableStore)
if !ok {
return nil
}
start := time.Now()
ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout())
defer cancel()
regionIDs, err := s.SplitRegions(ctxWithTimeout, e.splitIdxKeys, true, &e.tableInfo.ID)
if err != nil {
logutil.BgLogger().Warn("split table index region failed",
zap.String("table", e.tableInfo.Name.L),
zap.String("index", e.indexInfo.Name.L),
zap.Error(err))
}
e.splitRegions = len(regionIDs)
if e.splitRegions == 0 {
return nil
}
if !e.ctx.GetSessionVars().WaitSplitRegionFinish {
return nil
}
e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.ctx, start, s, regionIDs, e.tableInfo.Name.L, e.indexInfo.Name.L)
return nil
}
func (e *SplitIndexRegionExec) getSplitIdxKeys() ([][]byte, error) {
// Split index regions by user specified value lists.
if len(e.valueLists) > 0 {
return e.getSplitIdxKeysFromValueList()
}
return e.getSplitIdxKeysFromBound()
}
func (e *SplitIndexRegionExec) getSplitIdxKeysFromValueList() (keys [][]byte, err error) {
pi := e.tableInfo.GetPartitionInfo()
if pi == nil {
keys = make([][]byte, 0, len(e.valueLists)+1)
return e.getSplitIdxPhysicalKeysFromValueList(e.tableInfo.ID, keys)
}
// Split for all table partitions.
if len(e.partitionNames) == 0 {
keys = make([][]byte, 0, (len(e.valueLists)+1)*len(pi.Definitions))
for _, p := range pi.Definitions {
keys, err = e.getSplitIdxPhysicalKeysFromValueList(p.ID, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
// Split for specified table partitions.
keys = make([][]byte, 0, (len(e.valueLists)+1)*len(e.partitionNames))
for _, name := range e.partitionNames {
pid, err := tables.FindPartitionByName(e.tableInfo, name.L)
if err != nil {
return nil, err
}
keys, err = e.getSplitIdxPhysicalKeysFromValueList(pid, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
func (e *SplitIndexRegionExec) getSplitIdxPhysicalKeysFromValueList(physicalID int64, keys [][]byte) ([][]byte, error) {
keys = e.getSplitIdxPhysicalStartAndOtherIdxKeys(physicalID, keys)
index := tables.NewIndex(physicalID, e.tableInfo, e.indexInfo)
for _, v := range e.valueLists {
idxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, v, kv.IntHandle(math.MinInt64), nil)
if err != nil {
return nil, err
}
keys = append(keys, idxKey)
}
return keys, nil
}
func (e *SplitIndexRegionExec) getSplitIdxPhysicalStartAndOtherIdxKeys(physicalID int64, keys [][]byte) [][]byte {
// 1. Split in the start key for the index if the index is not the first index.
// For the first index, splitting the start key can produce the region [tid, tid_i_1), which is useless.
if len(e.tableInfo.Indices) > 0 && e.tableInfo.Indices[0].ID != e.indexInfo.ID {
startKey := tablecodec.EncodeTableIndexPrefix(physicalID, e.indexInfo.ID)
keys = append(keys, startKey)
}
// 2. Split in the end key.
endKey := tablecodec.EncodeTableIndexPrefix(physicalID, e.indexInfo.ID+1)
keys = append(keys, endKey)
return keys
}
func (e *SplitIndexRegionExec) getSplitIdxKeysFromBound() (keys [][]byte, err error) {
pi := e.tableInfo.GetPartitionInfo()
if pi == nil {
keys = make([][]byte, 0, e.num)
return e.getSplitIdxPhysicalKeysFromBound(e.tableInfo.ID, keys)
}
// Split for all table partitions.
if len(e.partitionNames) == 0 {
keys = make([][]byte, 0, e.num*len(pi.Definitions))
for _, p := range pi.Definitions {
keys, err = e.getSplitIdxPhysicalKeysFromBound(p.ID, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
// Split for specified table partitions.
keys = make([][]byte, 0, e.num*len(e.partitionNames))
for _, name := range e.partitionNames {
pid, err := tables.FindPartitionByName(e.tableInfo, name.L)
if err != nil {
return nil, err
}
keys, err = e.getSplitIdxPhysicalKeysFromBound(pid, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
func (e *SplitIndexRegionExec) getSplitIdxPhysicalKeysFromBound(physicalID int64, keys [][]byte) ([][]byte, error) {
keys = e.getSplitIdxPhysicalStartAndOtherIdxKeys(physicalID, keys)
index := tables.NewIndex(physicalID, e.tableInfo, e.indexInfo)
// Split index regions by lower, upper value and calculate the step by (upper - lower)/num.
lowerIdxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, e.lower, kv.IntHandle(math.MinInt64), nil)
if err != nil {
return nil, err
}
// Use math.MinInt64 as handle_id for the upper index key to avoid affecting calculate split point.
// If use math.MaxInt64 here, test of `TestSplitIndex` will report error.
upperIdxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, e.upper, kv.IntHandle(math.MinInt64), nil)
if err != nil {
return nil, err
}
if bytes.Compare(lowerIdxKey, upperIdxKey) >= 0 {
lowerStr := datumSliceToString(e.lower)
upperStr := datumSliceToString(e.upper)
errMsg := fmt.Sprintf("Split index `%v` region lower value %v should less than the upper value %v",
e.indexInfo.Name, lowerStr, upperStr)
return nil, ErrInvalidSplitRegionRanges.GenWithStackByArgs(errMsg)
}
return getValuesList(lowerIdxKey, upperIdxKey, e.num, keys), nil
}
// getValuesList is used to get `num` values between lower and upper value.
// To Simplify the explain, suppose lower and upper value type is int64, and lower=0, upper=100, num=10,
// then calculate the step=(upper-lower)/num=10, then the function should return 0+10, 10+10, 20+10... all together 9 (num-1) values.
// Then the function will return [10,20,30,40,50,60,70,80,90].
// The difference is the value type of upper, lower is []byte, So I use getUint64FromBytes to convert []byte to uint64.
func getValuesList(lower, upper []byte, num int, valuesList [][]byte) [][]byte {
commonPrefixIdx := longestCommonPrefixLen(lower, upper)
step := getStepValue(lower[commonPrefixIdx:], upper[commonPrefixIdx:], num)
startV := getUint64FromBytes(lower[commonPrefixIdx:], 0)
// To get `num` regions, only need to split `num-1` idx keys.
buf := make([]byte, 8)
for i := 0; i < num-1; i++ {
value := make([]byte, 0, commonPrefixIdx+8)
value = append(value, lower[:commonPrefixIdx]...)
startV += step
binary.BigEndian.PutUint64(buf, startV)
value = append(value, buf...)
valuesList = append(valuesList, value)
}
return valuesList
}
// longestCommonPrefixLen gets the longest common prefix byte length.
func longestCommonPrefixLen(s1, s2 []byte) int {
l := mathutil.Min(len(s1), len(s2))
i := 0
for ; i < l; i++ {
if s1[i] != s2[i] {
break
}
}
return i
}
// getStepValue gets the step of between the lower and upper value. step = (upper-lower)/num.
// Convert byte slice to uint64 first.
func getStepValue(lower, upper []byte, num int) uint64 {
lowerUint := getUint64FromBytes(lower, 0)
upperUint := getUint64FromBytes(upper, 0xff)
return (upperUint - lowerUint) / uint64(num)
}
// getUint64FromBytes gets a uint64 from the `bs` byte slice.
// If len(bs) < 8, then padding with `pad`.
func getUint64FromBytes(bs []byte, pad byte) uint64 {
buf := bs
if len(buf) < 8 {
buf = make([]byte, 0, 8)
buf = append(buf, bs...)
for i := len(buf); i < 8; i++ {
buf = append(buf, pad)
}
}
return binary.BigEndian.Uint64(buf)
}
func datumSliceToString(ds []types.Datum) string {
str := "("
for i, d := range ds {
s, err := d.ToString()
if err != nil {
return fmt.Sprintf("%v", ds)
}
if i > 0 {
str += ","
}
str += s
}
str += ")"
return str
}
// SplitTableRegionExec represents a split table regions executor.
type SplitTableRegionExec struct {
baseExecutor
tableInfo *model.TableInfo
partitionNames []model.CIStr
lower []types.Datum
upper []types.Datum
num int
handleCols core.HandleCols
valueLists [][]types.Datum
splitKeys [][]byte
done bool
splitRegionResult
}
// Open implements the Executor Open interface.
func (e *SplitTableRegionExec) Open(ctx context.Context) (err error) {
e.splitKeys, err = e.getSplitTableKeys()
return err
}
// Next implements the Executor Next interface.
func (e *SplitTableRegionExec) Next(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
if e.done {
return nil
}
e.done = true
if err := e.splitTableRegion(ctx); err != nil {
return err
}
appendSplitRegionResultToChunk(chk, e.splitRegions, e.finishScatterNum)
return nil
}
func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error {
store := e.ctx.GetStore()
s, ok := store.(kv.SplittableStore)
if !ok {
return nil
}
start := time.Now()
ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout())
defer cancel()
regionIDs, err := s.SplitRegions(ctxWithTimeout, e.splitKeys, true, &e.tableInfo.ID)
if err != nil {
logutil.BgLogger().Warn("split table region failed",
zap.String("table", e.tableInfo.Name.L),
zap.Error(err))
}
e.splitRegions = len(regionIDs)
if e.splitRegions == 0 {
return nil
}
if !e.ctx.GetSessionVars().WaitSplitRegionFinish {
return nil
}
e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.ctx, start, s, regionIDs, e.tableInfo.Name.L, "")
return nil
}
func waitScatterRegionFinish(ctxWithTimeout context.Context, sctx sessionctx.Context, startTime time.Time, store kv.SplittableStore, regionIDs []uint64, tableName, indexName string) int {
remainMillisecond := 0
finishScatterNum := 0
for _, regionID := range regionIDs {
if isCtxDone(ctxWithTimeout) {
// Do not break here for checking remain regions scatter finished with a very short backoff time.
// Consider this situation - Regions 1, 2, and 3 are to be split.
// Region 1 times out before scattering finishes, while Region 2 and Region 3 have finished scattering.
// In this case, we should return 2 Regions, instead of 0, have finished scattering.
remainMillisecond = checkScatterRegionFinishBackOff
} else {
remainMillisecond = int((sctx.GetSessionVars().GetSplitRegionTimeout().Seconds() - time.Since(startTime).Seconds()) * 1000)
}
err := store.WaitScatterRegionFinish(ctxWithTimeout, regionID, remainMillisecond)
if err == nil {
finishScatterNum++
} else {
if len(indexName) == 0 {
logutil.BgLogger().Warn("wait scatter region failed",
zap.Uint64("regionID", regionID),
zap.String("table", tableName),
zap.Error(err))
} else {
logutil.BgLogger().Warn("wait scatter region failed",
zap.Uint64("regionID", regionID),
zap.String("table", tableName),
zap.String("index", indexName),
zap.Error(err))
}
}
}
return finishScatterNum
}
func appendSplitRegionResultToChunk(chk *chunk.Chunk, totalRegions, finishScatterNum int) {
chk.AppendInt64(0, int64(totalRegions))
if finishScatterNum > 0 && totalRegions > 0 {
chk.AppendFloat64(1, float64(finishScatterNum)/float64(totalRegions))
} else {
chk.AppendFloat64(1, float64(0))
}
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
var minRegionStepValue = int64(1000)
func (e *SplitTableRegionExec) getSplitTableKeys() ([][]byte, error) {
if len(e.valueLists) > 0 {
return e.getSplitTableKeysFromValueList()
}
return e.getSplitTableKeysFromBound()
}
func (e *SplitTableRegionExec) getSplitTableKeysFromValueList() ([][]byte, error) {
var keys [][]byte
pi := e.tableInfo.GetPartitionInfo()
if pi == nil {
keys = make([][]byte, 0, len(e.valueLists))
return e.getSplitTablePhysicalKeysFromValueList(e.tableInfo.ID, keys)
}
// Split for all table partitions.
if len(e.partitionNames) == 0 {
keys = make([][]byte, 0, len(e.valueLists)*len(pi.Definitions))
for _, p := range pi.Definitions {
var err error
keys, err = e.getSplitTablePhysicalKeysFromValueList(p.ID, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
// Split for specified table partitions.
keys = make([][]byte, 0, len(e.valueLists)*len(e.partitionNames))
for _, name := range e.partitionNames {
pid, err := tables.FindPartitionByName(e.tableInfo, name.L)
if err != nil {
return nil, err
}
keys, err = e.getSplitTablePhysicalKeysFromValueList(pid, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
func (e *SplitTableRegionExec) getSplitTablePhysicalKeysFromValueList(physicalID int64, keys [][]byte) ([][]byte, error) {
recordPrefix := tablecodec.GenTableRecordPrefix(physicalID)
for _, v := range e.valueLists {
handle, err := e.handleCols.BuildHandleByDatums(v)
if err != nil {
return nil, err
}
key := tablecodec.EncodeRecordKey(recordPrefix, handle)
keys = append(keys, key)
}
return keys, nil
}
func (e *SplitTableRegionExec) getSplitTableKeysFromBound() ([][]byte, error) {
var keys [][]byte
pi := e.tableInfo.GetPartitionInfo()
if pi == nil {
keys = make([][]byte, 0, e.num)
return e.getSplitTablePhysicalKeysFromBound(e.tableInfo.ID, keys)
}
// Split for all table partitions.
if len(e.partitionNames) == 0 {
keys = make([][]byte, 0, e.num*len(pi.Definitions))
for _, p := range pi.Definitions {
var err error
keys, err = e.getSplitTablePhysicalKeysFromBound(p.ID, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
// Split for specified table partitions.
keys = make([][]byte, 0, e.num*len(e.partitionNames))
for _, name := range e.partitionNames {
pid, err := tables.FindPartitionByName(e.tableInfo, name.L)
if err != nil {
return nil, err
}
keys, err = e.getSplitTablePhysicalKeysFromBound(pid, keys)
if err != nil {
return nil, err
}
}
return keys, nil
}
func (e *SplitTableRegionExec) calculateIntBoundValue() (lowerValue int64, step int64, err error) {
isUnsigned := false
if e.tableInfo.PKIsHandle {
if pkCol := e.tableInfo.GetPkColInfo(); pkCol != nil {
isUnsigned = mysql.HasUnsignedFlag(pkCol.Flag)
}
}
if isUnsigned {
lowerRecordID := e.lower[0].GetUint64()
upperRecordID := e.upper[0].GetUint64()
if upperRecordID <= lowerRecordID {
errMsg := fmt.Sprintf("lower value %v should less than the upper value %v", lowerRecordID, upperRecordID)
return 0, 0, ErrInvalidSplitRegionRanges.GenWithStackByArgs(errMsg)
}
step = int64((upperRecordID - lowerRecordID) / uint64(e.num))
lowerValue = int64(lowerRecordID)
} else {
lowerRecordID := e.lower[0].GetInt64()
upperRecordID := e.upper[0].GetInt64()
if upperRecordID <= lowerRecordID {
errMsg := fmt.Sprintf("lower value %v should less than the upper value %v", lowerRecordID, upperRecordID)
return 0, 0, ErrInvalidSplitRegionRanges.GenWithStackByArgs(errMsg)
}
step = int64(uint64(upperRecordID-lowerRecordID) / uint64(e.num))
lowerValue = lowerRecordID
}
if step < minRegionStepValue {
errMsg := fmt.Sprintf("the region size is too small, expected at least %d, but got %d", step, minRegionStepValue)
return 0, 0, ErrInvalidSplitRegionRanges.GenWithStackByArgs(errMsg)
}
return lowerValue, step, nil
}
func (e *SplitTableRegionExec) getSplitTablePhysicalKeysFromBound(physicalID int64, keys [][]byte) ([][]byte, error) {
recordPrefix := tablecodec.GenTableRecordPrefix(physicalID)
// Split a separate region for index.
containsIndex := len(e.tableInfo.Indices) > 0 && !(e.tableInfo.IsCommonHandle && len(e.tableInfo.Indices) == 1)
if containsIndex {
keys = append(keys, recordPrefix)
}
if e.handleCols.IsInt() {
low, step, err := e.calculateIntBoundValue()
if err != nil {
return nil, err
}
recordID := low
for i := 1; i < e.num; i++ {
recordID += step
key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID))
keys = append(keys, key)
}
return keys, nil
}
lowerHandle, err := e.handleCols.BuildHandleByDatums(e.lower)
if err != nil {
return nil, err
}
upperHandle, err := e.handleCols.BuildHandleByDatums(e.upper)
if err != nil {
return nil, err
}
if lowerHandle.Compare(upperHandle) >= 0 {
lowerStr := datumSliceToString(e.lower)
upperStr := datumSliceToString(e.upper)
errMsg := fmt.Sprintf("Split table `%v` region lower value %v should less than the upper value %v",
e.tableInfo.Name.O, lowerStr, upperStr)
return nil, ErrInvalidSplitRegionRanges.GenWithStackByArgs(errMsg)
}
low := tablecodec.EncodeRecordKey(recordPrefix, lowerHandle)
up := tablecodec.EncodeRecordKey(recordPrefix, upperHandle)
return getValuesList(low, up, e.num, keys), nil
}
// RegionMeta contains a region's peer detail
type regionMeta struct {
region *metapb.Region
leaderID uint64
storeID uint64 // storeID is the store ID of the leader region.
start string
end string
scattering bool
writtenBytes int64
readBytes int64
approximateSize int64
approximateKeys int64
}
func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) {
if uniqueRegionMap == nil {
uniqueRegionMap = make(map[uint64]struct{})
}
// This is used to decode the int handle properly.
var hasUnsignedIntHandle bool
if pkInfo := tableInfo.GetPkColInfo(); pkInfo != nil {
hasUnsignedIntHandle = mysql.HasUnsignedFlag(pkInfo.Flag)
}
// for record
startKey, endKey := tablecodec.GetTableHandleKeyRange(physicalTableID)
regionCache := tikvStore.GetRegionCache()
recordRegionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey)
if err != nil {
return nil, err
}
recordPrefix := tablecodec.GenTableRecordPrefix(physicalTableID)
tablePrefix := tablecodec.GenTablePrefix(physicalTableID)
recordRegions, err := getRegionMeta(tikvStore, recordRegionMetas, uniqueRegionMap, tablePrefix, recordPrefix, nil, physicalTableID, 0, hasUnsignedIntHandle)
if err != nil {
return nil, err
}
regions := recordRegions
// for indices
for _, index := range tableInfo.Indices {
if index.State != model.StatePublic {
continue
}
startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, index.ID)
regionMetas, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey)
if err != nil {
return nil, err
}
indexPrefix := tablecodec.EncodeTableIndexPrefix(physicalTableID, index.ID)
indexRegions, err := getRegionMeta(tikvStore, regionMetas, uniqueRegionMap, tablePrefix, recordPrefix, indexPrefix, physicalTableID, index.ID, hasUnsignedIntHandle)
if err != nil {
return nil, err
}
regions = append(regions, indexRegions...)
}
err = checkRegionsStatus(s, regions)
if err != nil {
return nil, err
}
return regions, nil
}
func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore helper.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) {
if uniqueRegionMap == nil {
uniqueRegionMap = make(map[uint64]struct{})
}
startKey, endKey := tablecodec.GetTableIndexKeyRange(physicalTableID, indexInfo.ID)
regionCache := tikvStore.GetRegionCache()
regions, err := regionCache.LoadRegionsInKeyRange(tikv.NewBackofferWithVars(context.Background(), 20000, nil), startKey, endKey)
if err != nil {
return nil, err
}
recordPrefix := tablecodec.GenTableRecordPrefix(physicalTableID)
tablePrefix := tablecodec.GenTablePrefix(physicalTableID)
indexPrefix := tablecodec.EncodeTableIndexPrefix(physicalTableID, indexInfo.ID)
indexRegions, err := getRegionMeta(tikvStore, regions, uniqueRegionMap, tablePrefix, recordPrefix, indexPrefix, physicalTableID, indexInfo.ID, false)
if err != nil {
return nil, err
}
err = checkRegionsStatus(s, indexRegions)
if err != nil {
return nil, err
}
return indexRegions, nil
}
func checkRegionsStatus(store kv.SplittableStore, regions []regionMeta) error {
for i := range regions {
scattering, err := store.CheckRegionInScattering(regions[i].region.Id)
if err != nil {
return err
}
regions[i].scattering = scattering
}
return nil
}
func decodeRegionsKey(regions []regionMeta, tablePrefix, recordPrefix, indexPrefix []byte,
physicalTableID, indexID int64, hasUnsignedIntHandle bool) {
d := ®ionKeyDecoder{
physicalTableID: physicalTableID,
tablePrefix: tablePrefix,
recordPrefix: recordPrefix,
indexPrefix: indexPrefix,
indexID: indexID,
hasUnsignedIntHandle: hasUnsignedIntHandle,
}
for i := range regions {
regions[i].start = d.decodeRegionKey(regions[i].region.StartKey)
regions[i].end = d.decodeRegionKey(regions[i].region.EndKey)
}
}
type regionKeyDecoder struct {
physicalTableID int64
tablePrefix []byte
recordPrefix []byte
indexPrefix []byte
indexID int64
hasUnsignedIntHandle bool
}
func (d *regionKeyDecoder) decodeRegionKey(key []byte) string {
if len(d.indexPrefix) > 0 && bytes.HasPrefix(key, d.indexPrefix) {
return fmt.Sprintf("t_%d_i_%d_%x", d.physicalTableID, d.indexID, key[len(d.indexPrefix):])
} else if len(d.recordPrefix) > 0 && bytes.HasPrefix(key, d.recordPrefix) {
if len(d.recordPrefix) == len(key) {
return fmt.Sprintf("t_%d_r", d.physicalTableID)
}
isIntHandle := len(key)-len(d.recordPrefix) == 8
if isIntHandle {
_, handle, err := codec.DecodeInt(key[len(d.recordPrefix):])
if err == nil {
if d.hasUnsignedIntHandle {
return fmt.Sprintf("t_%d_r_%d", d.physicalTableID, uint64(handle))
}
return fmt.Sprintf("t_%d_r_%d", d.physicalTableID, handle)
}
}
return fmt.Sprintf("t_%d_r_%x", d.physicalTableID, key[len(d.recordPrefix):])
}
if len(d.tablePrefix) > 0 && bytes.HasPrefix(key, d.tablePrefix) {
key = key[len(d.tablePrefix):]
// Has index prefix.
if !bytes.HasPrefix(key, []byte("_i")) {
return fmt.Sprintf("t_%d_%x", d.physicalTableID, key)
}
key = key[2:]
// try to decode index ID.
if _, indexID, err := codec.DecodeInt(key); err == nil {
return fmt.Sprintf("t_%d_i_%d_%x", d.physicalTableID, indexID, key[8:])
}
return fmt.Sprintf("t_%d_i__%x", d.physicalTableID, key)
}
// Has table prefix.
if bytes.HasPrefix(key, []byte("t")) {
key = key[1:]
// try to decode table ID.
if _, tableID, err := codec.DecodeInt(key); err == nil {
return fmt.Sprintf("t_%d_%x", tableID, key[8:])
}
return fmt.Sprintf("t_%x", key)
}
return fmt.Sprintf("%x", key)
}
func getRegionMeta(tikvStore helper.Storage, regionMetas []*tikv.Region, uniqueRegionMap map[uint64]struct{},
tablePrefix, recordPrefix, indexPrefix []byte, physicalTableID, indexID int64,
hasUnsignedIntHandle bool) ([]regionMeta, error) {
regions := make([]regionMeta, 0, len(regionMetas))
for _, r := range regionMetas {
if _, ok := uniqueRegionMap[r.GetID()]; ok {
continue
}
uniqueRegionMap[r.GetID()] = struct{}{}
regions = append(regions, regionMeta{
region: r.GetMeta(),
leaderID: r.GetLeaderPeerID(),
storeID: r.GetLeaderStoreID(),
})
}
regions, err := getRegionInfo(tikvStore, regions)
if err != nil {
return regions, err
}
decodeRegionsKey(regions, tablePrefix, recordPrefix, indexPrefix, physicalTableID, indexID, hasUnsignedIntHandle)
return regions, nil
}
func getRegionInfo(store helper.Storage, regions []regionMeta) ([]regionMeta, error) {
// check pd server exists.
etcd, ok := store.(kv.EtcdBackend)
if !ok {
return regions, nil
}
pdHosts, err := etcd.EtcdAddrs()
if err != nil {
return regions, err
}
if len(pdHosts) == 0 {
return regions, nil
}
tikvHelper := &helper.Helper{
Store: store,
RegionCache: store.GetRegionCache(),
}
for i := range regions {
regionInfo, err := tikvHelper.GetRegionInfoByID(regions[i].region.Id)
if err != nil {
return nil, err
}
regions[i].writtenBytes = regionInfo.WrittenBytes
regions[i].readBytes = regionInfo.ReadBytes
regions[i].approximateSize = regionInfo.ApproximateSize
regions[i].approximateKeys = regionInfo.ApproximateKeys
}
return regions, nil
}
| executor/split.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0021614455617964268,
0.00021339151135180146,
0.00016225701256189495,
0.00017019876395352185,
0.00023217161651700735
] |
{
"id": 6,
"code_window": [
"\t\"go.uber.org/zap\"\n",
")\n",
"\n",
"type actionPessimisticLock struct {\n",
"\t*kv.LockCtx\n",
"}\n",
"type actionPessimisticRollback struct{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")\n",
"\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "add",
"edit_start_line_idx": 34
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tracing
import (
"context"
"github.com/opentracing/basictracer-go"
"github.com/opentracing/opentracing-go"
)
// TiDBTrace is set as Baggage on traces which are used for tidb tracing.
const TiDBTrace = "tr"
// A CallbackRecorder immediately invokes itself on received trace spans.
type CallbackRecorder func(sp basictracer.RawSpan)
// RecordSpan implements basictracer.SpanRecorder.
func (cr CallbackRecorder) RecordSpan(sp basictracer.RawSpan) {
cr(sp)
}
// NewRecordedTrace returns a Span which records directly via the specified
// callback.
func NewRecordedTrace(opName string, callback func(sp basictracer.RawSpan)) opentracing.Span {
tr := basictracer.New(CallbackRecorder(callback))
opentracing.SetGlobalTracer(tr)
sp := tr.StartSpan(opName)
sp.SetBaggageItem(TiDBTrace, "1")
return sp
}
// noopSpan returns a Span which discards all operations.
func noopSpan() opentracing.Span {
return (opentracing.NoopTracer{}).StartSpan("DefaultSpan")
}
// SpanFromContext returns the span obtained from the context or, if none is found, a new one started through tracer.
func SpanFromContext(ctx context.Context) (sp opentracing.Span) {
if sp = opentracing.SpanFromContext(ctx); sp == nil {
return noopSpan()
}
return sp
}
// ChildSpanFromContxt return a non-nil span. If span can be got from ctx, then returned span is
// a child of such span. Otherwise, returned span is a noop span.
func ChildSpanFromContxt(ctx context.Context, opName string) (opentracing.Span, context.Context) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
if _, ok := sp.Tracer().(opentracing.NoopTracer); !ok {
child := opentracing.StartSpan(opName, opentracing.ChildOf(sp.Context()))
return child, opentracing.ContextWithSpan(ctx, child)
}
}
return noopSpan(), ctx
}
| util/tracing/util.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00038030269206501544,
0.00020187089103274047,
0.0001675059029366821,
0.0001724036264931783,
0.00007289839413715526
] |
{
"id": 6,
"code_window": [
"\t\"go.uber.org/zap\"\n",
")\n",
"\n",
"type actionPessimisticLock struct {\n",
"\t*kv.LockCtx\n",
"}\n",
"type actionPessimisticRollback struct{}\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"// Used for pessimistic lock wait time\n",
"// these two constants are special for lock protocol with tikv\n",
"// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds\n",
"var (\n",
"\tLockAlwaysWait = int64(0)\n",
"\tLockNoWait = int64(-1)\n",
")\n",
"\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "add",
"edit_start_line_idx": 34
} | use test;
drop table if exists t;
create table t(a bigint, b bigint);
explain format = 'brief' insert into t values(1, 1);
id estRows task access object operator info
Insert N/A root N/A
explain format = 'brief' insert into t select * from t;
id estRows task access object operator info
Insert N/A root N/A
└─TableReader 10000.00 root data:TableFullScan
└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain format = 'brief' delete from t where a > 100;
id estRows task access object operator info
Delete N/A root N/A
└─TableReader 3333.33 root data:Selection
└─Selection 3333.33 cop[tikv] gt(test.t.a, 100)
└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain format = 'brief' update t set b = 100 where a = 200;
id estRows task access object operator info
Update N/A root N/A
└─TableReader 10.00 root data:Selection
└─Selection 10.00 cop[tikv] eq(test.t.a, 200)
└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
explain format = 'brief' replace into t select a, 100 from t;
id estRows task access object operator info
Insert N/A root N/A
└─Projection 10000.00 root test.t.a, 100->Column#6
└─TableReader 10000.00 root data:TableFullScan
└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo
| cmd/explaintest/r/explain-non-select-stmt.result | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0001725765032460913,
0.0001711892691673711,
0.00016963786038104445,
0.00017135344387497753,
0.0000012052995543854195
] |
{
"id": 7,
"code_window": [
"\t\tif action.LockWaitTime > 0 {\n",
"\t\t\ttimeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()\n",
"\t\t\tif timeLeft <= 0 {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = tidbkv.LockNoWait\n",
"\t\t\t} else {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = timeLeft\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\tfailpoint.Inject(\"PessimisticLockErrWriteConflict\", func() error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\treq.PessimisticLock().WaitTimeout = LockNoWait\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 101
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.029534826055169106,
0.0011887848377227783,
0.00015463503950741142,
0.00017549589392729104,
0.003152347169816494
] |
{
"id": 7,
"code_window": [
"\t\tif action.LockWaitTime > 0 {\n",
"\t\t\ttimeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()\n",
"\t\t\tif timeLeft <= 0 {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = tidbkv.LockNoWait\n",
"\t\t\t} else {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = timeLeft\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\tfailpoint.Inject(\"PessimisticLockErrWriteConflict\", func() error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\treq.PessimisticLock().WaitTimeout = LockNoWait\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 101
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"context"
"github.com/pingcap/parser/ast"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
)
type gcSubstituter struct {
}
// ExprColumnMap is used to store all expressions of indexed generated columns in a table,
// and map them to the generated columns,
// thus we can substitute the expression in a query to an indexed generated column.
type ExprColumnMap map[expression.Expression]*expression.Column
// optimize try to replace the expression to indexed virtual generate column in where, group by, order by, and field clause
// so that we can use the index on expression.
// For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and
// an index on c. We need to replace a+1 with c so that we can use the index on c.
// See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html
func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) {
exprToColumn := make(ExprColumnMap)
collectGenerateColumn(lp, exprToColumn)
if len(exprToColumn) == 0 {
return lp, nil
}
return gc.substitute(ctx, lp, exprToColumn), nil
}
// collectGenerateColumn collect the generate column and save them to a map from their expressions to themselves.
// For the sake of simplicity, we don't collect the stored generate column because we can't get their expressions directly.
// TODO: support stored generate column.
func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) {
for _, child := range lp.Children() {
collectGenerateColumn(child, exprToColumn)
}
ds, ok := lp.(*DataSource)
if !ok {
return
}
tblInfo := ds.tableInfo
for _, idx := range tblInfo.Indices {
for _, idxPart := range idx.Columns {
colInfo := tblInfo.Columns[idxPart.Offset]
if colInfo.IsGenerated() && !colInfo.GeneratedStored {
s := ds.schema.Columns
col := expression.ColInfo2Col(s, colInfo)
if col != nil && col.GetType().Equal(col.VirtualExpr.GetType()) {
exprToColumn[col.VirtualExpr] = col
}
}
}
}
}
func tryToSubstituteExpr(expr *expression.Expression, sctx sessionctx.Context, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column) {
if (*expr).Equal(sctx, candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
schema.ColumnIndex(col) != -1 {
*expr = col
}
}
func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap) LogicalPlan {
sctx := lp.SCtx().GetSessionVars().StmtCtx
var expr *expression.Expression
var tp types.EvalType
switch x := lp.(type) {
case *LogicalSelection:
for _, cond := range x.Conditions {
sf, ok := cond.(*expression.ScalarFunction)
if !ok {
continue
}
switch sf.FuncName.L {
case ast.EQ, ast.LT, ast.LE, ast.GT, ast.GE:
if sf.GetArgs()[0].ConstItem(sctx) {
expr = &sf.GetArgs()[1]
tp = sf.GetArgs()[0].GetType().EvalType()
} else if sf.GetArgs()[1].ConstItem(sctx) {
expr = &sf.GetArgs()[0]
tp = sf.GetArgs()[1].GetType().EvalType()
} else {
continue
}
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(expr, lp.SCtx(), candidateExpr, tp, x.Schema(), column)
}
case ast.In:
expr = &sf.GetArgs()[0]
tp = sf.GetArgs()[1].GetType().EvalType()
canSubstitute := true
// Can only substitute if all the operands on the right-hand
// side are constants of the same type.
for i := 1; i < len(sf.GetArgs()); i++ {
if !sf.GetArgs()[i].ConstItem(sctx) || sf.GetArgs()[i].GetType().EvalType() != tp {
canSubstitute = false
break
}
}
if canSubstitute {
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(expr, lp.SCtx(), candidateExpr, tp, x.Schema(), column)
}
}
}
}
case *LogicalProjection:
for i := range x.Exprs {
tp = x.Exprs[i].GetType().EvalType()
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&x.Exprs[i], lp.SCtx(), candidateExpr, tp, x.children[0].Schema(), column)
}
}
case *LogicalSort:
for i := range x.ByItems {
tp = x.ByItems[i].Expr.GetType().EvalType()
for candidateExpr, column := range exprToColumn {
tryToSubstituteExpr(&x.ByItems[i].Expr, lp.SCtx(), candidateExpr, tp, x.Schema(), column)
}
}
// TODO: Uncomment these code after we support virtual generate column push down.
// case *LogicalAggregation:
// for _, aggFunc := range x.AggFuncs {
// for i := 0; i < len(aggFunc.Args); i++ {
// tp = aggFunc.Args[i].GetType().EvalType()
// for candidateExpr, column := range exprToColumn {
// if aggFunc.Args[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
// x.Schema().ColumnIndex(column) != -1 {
// aggFunc.Args[i] = column
// }
// }
// }
// }
// for i := 0; i < len(x.GroupByItems); i++ {
// tp = x.GroupByItems[i].GetType().EvalType()
// for candidateExpr, column := range exprToColumn {
// if x.GroupByItems[i].Equal(lp.SCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp &&
// x.Schema().ColumnIndex(column) != -1 {
// x.GroupByItems[i] = column
// x.groupByCols = append(x.groupByCols, column)
// }
// }
// }
}
for _, child := range lp.Children() {
gc.substitute(ctx, child, exprToColumn)
}
return lp
}
func (*gcSubstituter) name() string {
return "generate_column_substitute"
}
| planner/core/rule_generate_column_substitute.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017757285968400538,
0.00017239544831681997,
0.00016364280600100756,
0.00017365628445986658,
0.0000040563481888966635
] |
{
"id": 7,
"code_window": [
"\t\tif action.LockWaitTime > 0 {\n",
"\t\t\ttimeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()\n",
"\t\t\tif timeLeft <= 0 {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = tidbkv.LockNoWait\n",
"\t\t\t} else {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = timeLeft\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\tfailpoint.Inject(\"PessimisticLockErrWriteConflict\", func() error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\treq.PessimisticLock().WaitTimeout = LockNoWait\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 101
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
)
// BinlogExecutor defines the logic to replicate binlogs during transaction commit.
type BinlogExecutor interface {
Prewrite(ctx context.Context, primary []byte) <-chan BinlogWriteResult
Commit(ctx context.Context, commitTS int64)
Skip()
}
// BinlogWriteResult defines the result of prewrite binlog.
type BinlogWriteResult interface {
Skipped() bool
GetError() error
}
| store/tikv/binlog.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0001943202078109607,
0.00017884884437080473,
0.00017062968981917948,
0.00017522272537462413,
0.00000927628298086347
] |
{
"id": 7,
"code_window": [
"\t\tif action.LockWaitTime > 0 {\n",
"\t\t\ttimeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()\n",
"\t\t\tif timeLeft <= 0 {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = tidbkv.LockNoWait\n",
"\t\t\t} else {\n",
"\t\t\t\treq.PessimisticLock().WaitTimeout = timeLeft\n",
"\t\t\t}\n",
"\t\t}\n",
"\t\tfailpoint.Inject(\"PessimisticLockErrWriteConflict\", func() error {\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\treq.PessimisticLock().WaitTimeout = LockNoWait\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 101
} | package aggfuncs_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
)
func (s *testSuite) TestMergePartialResult4Varsamp(c *C) {
tests := []aggTest{
buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, 2.5, 1, 1.9821428571428572),
}
for _, test := range tests {
s.testMergePartialResult(c, test)
}
}
func (s *testSuite) TestVarsamp(c *C) {
tests := []aggTest{
buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, nil, 2.5),
}
for _, test := range tests {
s.testAggFunc(c, test)
}
}
| executor/aggfuncs/func_varsamp_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017383246449753642,
0.00016479204350616783,
0.0001565217098686844,
0.0001640219270484522,
0.000007088035090418998
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\t// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring\n",
"\t\t// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.\n",
"\t\tif msBeforeTxnExpired > 0 {\n",
"\t\t\tif action.LockWaitTime == tidbkv.LockNoWait {\n",
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif action.LockWaitTime == LockNoWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.028430163860321045,
0.001619641319848597,
0.0001588196464581415,
0.00017568461771588773,
0.003964218311011791
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\t// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring\n",
"\t\t// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.\n",
"\t\tif msBeforeTxnExpired > 0 {\n",
"\t\t\tif action.LockWaitTime == tidbkv.LockNoWait {\n",
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif action.LockWaitTime == LockNoWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testutil"
)
var _ = Suite(&testColumnChangeSuite{})
type testColumnChangeSuite struct {
store kv.Storage
dbInfo *model.DBInfo
}
func (s *testColumnChangeSuite) SetUpSuite(c *C) {
SetWaitTimeWhenErrorOccurred(1 * time.Microsecond)
s.store = testCreateStore(c, "test_column_change")
s.dbInfo = &model.DBInfo{
Name: model.NewCIStr("test_column_change"),
ID: 1,
}
err := kv.RunInNewTxn(context.Background(), s.store, true, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
return errors.Trace(t.CreateDatabase(s.dbInfo))
})
c.Check(err, IsNil)
}
func (s *testColumnChangeSuite) TearDownSuite(c *C) {
s.store.Close()
}
func (s *testColumnChangeSuite) TestColumnChange(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
// create table t (c1 int, c2 int);
tblInfo := testTableInfo(c, d, "t", 2)
ctx := testNewContext(d)
err := ctx.NewTxn(context.Background())
c.Assert(err, IsNil)
testCreateTable(c, ctx, d, s.dbInfo, tblInfo)
// insert t values (1, 2);
originTable := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID)
row := types.MakeDatums(1, 2)
h, err := originTable.AddRecord(ctx, row)
c.Assert(err, IsNil)
txn, err := ctx.Txn(true)
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
var mu sync.Mutex
tc := &TestDDLCallback{}
// set up hook
prevState := model.StateNone
var (
deleteOnlyTable table.Table
writeOnlyTable table.Table
publicTable table.Table
)
var checkErr error
tc.onJobUpdated = func(job *model.Job) {
if job.SchemaState == prevState {
return
}
hookCtx := mock.NewContext()
hookCtx.Store = s.store
prevState = job.SchemaState
err := hookCtx.NewTxn(context.Background())
if err != nil {
checkErr = errors.Trace(err)
}
switch job.SchemaState {
case model.StateDeleteOnly:
deleteOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID)
if err != nil {
checkErr = errors.Trace(err)
}
case model.StateWriteOnly:
writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID)
if err != nil {
checkErr = errors.Trace(err)
}
err = s.checkAddWriteOnly(hookCtx, d, deleteOnlyTable, writeOnlyTable, h)
if err != nil {
checkErr = errors.Trace(err)
}
case model.StatePublic:
mu.Lock()
publicTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID)
if err != nil {
checkErr = errors.Trace(err)
}
err = s.checkAddPublic(hookCtx, d, writeOnlyTable, publicTable)
if err != nil {
checkErr = errors.Trace(err)
}
mu.Unlock()
}
txn, err := hookCtx.Txn(true)
if err != nil {
checkErr = errors.Trace(err)
}
err = txn.Commit(context.Background())
if err != nil {
checkErr = errors.Trace(err)
}
}
d.SetHook(tc)
defaultValue := int64(3)
job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, "c3", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, defaultValue)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
testCheckJobDone(c, d, job, true)
mu.Lock()
tb := publicTable
mu.Unlock()
s.testColumnDrop(c, ctx, d, tb)
s.testAddColumnNoDefault(c, ctx, d, tblInfo)
}
func (s *testColumnChangeSuite) TestModifyAutoRandColumnWithMetaKeyChanged(c *C) {
d := testNewDDLAndStart(
context.Background(),
c,
WithStore(s.store),
WithLease(testLease),
)
defer func() {
err := d.Stop()
c.Assert(err, IsNil)
}()
ids, err := d.genGlobalIDs(1)
tableID := ids[0]
c.Assert(err, IsNil)
colInfo := &model.ColumnInfo{
Name: model.NewCIStr("a"),
Offset: 0,
State: model.StatePublic,
FieldType: *types.NewFieldType(mysql.TypeLonglong),
}
tblInfo := &model.TableInfo{
ID: tableID,
Name: model.NewCIStr("auto_random_table_name"),
Columns: []*model.ColumnInfo{colInfo},
AutoRandomBits: 5,
}
colInfo.ID = allocateColumnID(tblInfo)
ctx := testNewContext(d)
testCreateTable(c, ctx, d, s.dbInfo, tblInfo)
tc := &TestDDLCallback{}
var errCount int32 = 3
var genAutoRandErr error
tc.onJobRunBefore = func(job *model.Job) {
if atomic.LoadInt32(&errCount) > 0 && job.Type == model.ActionModifyColumn {
atomic.AddInt32(&errCount, -1)
genAutoRandErr = kv.RunInNewTxn(context.Background(), s.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
_, err1 := t.GenAutoRandomID(s.dbInfo.ID, tableID, 1)
return err1
})
}
}
d.SetHook(tc)
const newAutoRandomBits uint64 = 10
job := &model.Job{
SchemaID: s.dbInfo.ID,
TableID: tblInfo.ID,
SchemaName: s.dbInfo.Name.L,
Type: model.ActionModifyColumn,
BinlogInfo: &model.HistoryInfo{},
Args: []interface{}{colInfo, colInfo.Name, ast.ColumnPosition{}, 0, newAutoRandomBits},
}
err = d.doDDLJob(ctx, job)
c.Assert(err, IsNil)
c.Assert(errCount == 0, IsTrue)
c.Assert(genAutoRandErr, IsNil)
testCheckJobDone(c, d, job, true)
var newTbInfo *model.TableInfo
err = kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
var err error
newTbInfo, err = t.GetTable(s.dbInfo.ID, tableID)
if err != nil {
return errors.Trace(err)
}
return nil
})
c.Assert(err, IsNil)
c.Assert(newTbInfo.AutoRandomBits, Equals, newAutoRandomBits)
}
func (s *testColumnChangeSuite) testAddColumnNoDefault(c *C, ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo) {
tc := &TestDDLCallback{}
// set up hook
prevState := model.StateNone
var checkErr error
var writeOnlyTable table.Table
tc.onJobUpdated = func(job *model.Job) {
if job.SchemaState == prevState {
return
}
hookCtx := mock.NewContext()
hookCtx.Store = s.store
prevState = job.SchemaState
err := hookCtx.NewTxn(context.Background())
if err != nil {
checkErr = errors.Trace(err)
}
switch job.SchemaState {
case model.StateWriteOnly:
writeOnlyTable, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID)
if err != nil {
checkErr = errors.Trace(err)
}
case model.StatePublic:
_, err = getCurrentTable(d, s.dbInfo.ID, tblInfo.ID)
if err != nil {
checkErr = errors.Trace(err)
}
_, err = writeOnlyTable.AddRecord(hookCtx, types.MakeDatums(10, 10))
if err != nil {
checkErr = errors.Trace(err)
}
}
txn, err := hookCtx.Txn(true)
if err != nil {
checkErr = errors.Trace(err)
}
err = txn.Commit(context.TODO())
if err != nil {
checkErr = errors.Trace(err)
}
}
d.SetHook(tc)
job := testCreateColumn(c, ctx, d, s.dbInfo, tblInfo, "c3", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, nil)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
testCheckJobDone(c, d, job, true)
}
func (s *testColumnChangeSuite) testColumnDrop(c *C, ctx sessionctx.Context, d *ddl, tbl table.Table) {
dropCol := tbl.Cols()[2]
tc := &TestDDLCallback{}
// set up hook
prevState := model.StateNone
var checkErr error
tc.onJobUpdated = func(job *model.Job) {
if job.SchemaState == prevState {
return
}
prevState = job.SchemaState
currentTbl, err := getCurrentTable(d, s.dbInfo.ID, tbl.Meta().ID)
if err != nil {
checkErr = errors.Trace(err)
}
for _, col := range currentTbl.Cols() {
if col.ID == dropCol.ID {
checkErr = errors.Errorf("column is not dropped")
}
}
}
d.SetHook(tc)
c.Assert(errors.ErrorStack(checkErr), Equals, "")
testDropColumn(c, ctx, d, s.dbInfo, tbl.Meta(), dropCol.Name.L, false)
}
func seek(t table.PhysicalTable, ctx sessionctx.Context, h kv.Handle) (kv.Handle, bool, error) {
txn, err := ctx.Txn(true)
if err != nil {
return nil, false, err
}
recordPrefix := t.RecordPrefix()
seekKey := tablecodec.EncodeRowKeyWithHandle(t.GetPhysicalID(), h)
iter, err := txn.Iter(seekKey, recordPrefix.PrefixNext())
if err != nil {
return nil, false, err
}
if !iter.Valid() || !iter.Key().HasPrefix(recordPrefix) {
// No more records in the table, skip to the end.
return nil, false, nil
}
handle, err := tablecodec.DecodeRowKey(iter.Key())
if err != nil {
return nil, false, err
}
return handle, true, nil
}
func (s *testColumnChangeSuite) checkAddWriteOnly(ctx sessionctx.Context, d *ddl, deleteOnlyTable, writeOnlyTable table.Table, h kv.Handle) error {
// WriteOnlyTable: insert t values (2, 3)
err := ctx.NewTxn(context.Background())
if err != nil {
return errors.Trace(err)
}
_, err = writeOnlyTable.AddRecord(ctx, types.MakeDatums(2, 3))
if err != nil {
return errors.Trace(err)
}
err = ctx.NewTxn(context.Background())
if err != nil {
return errors.Trace(err)
}
err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(),
testutil.RowsWithSep(" ", "1 2 <nil>", "2 3 3"))
if err != nil {
return errors.Trace(err)
}
// This test is for RowWithCols when column state is StateWriteOnly.
row, err := tables.RowWithCols(writeOnlyTable, ctx, h, writeOnlyTable.WritableCols())
if err != nil {
return errors.Trace(err)
}
got := fmt.Sprintf("%v", row)
expect := fmt.Sprintf("%v", []types.Datum{types.NewDatum(1), types.NewDatum(2), types.NewDatum(nil)})
if got != expect {
return errors.Errorf("expect %v, got %v", expect, got)
}
// DeleteOnlyTable: select * from t
err = checkResult(ctx, deleteOnlyTable, deleteOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "1 2", "2 3"))
if err != nil {
return errors.Trace(err)
}
// WriteOnlyTable: update t set c1 = 2 where c1 = 1
h, _, err = seek(writeOnlyTable.(table.PhysicalTable), ctx, kv.IntHandle(0))
if err != nil {
return errors.Trace(err)
}
err = writeOnlyTable.UpdateRecord(context.Background(), ctx, h, types.MakeDatums(1, 2, 3), types.MakeDatums(2, 2, 3), touchedSlice(writeOnlyTable))
if err != nil {
return errors.Trace(err)
}
err = ctx.NewTxn(context.Background())
if err != nil {
return errors.Trace(err)
}
// After we update the first row, its default value is also set.
err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "2 2 3", "2 3 3"))
if err != nil {
return errors.Trace(err)
}
// DeleteOnlyTable: delete from t where c2 = 2
err = deleteOnlyTable.RemoveRecord(ctx, h, types.MakeDatums(2, 2))
if err != nil {
return errors.Trace(err)
}
err = ctx.NewTxn(context.Background())
if err != nil {
return errors.Trace(err)
}
// After delete table has deleted the first row, check the WriteOnly table records.
err = checkResult(ctx, writeOnlyTable, writeOnlyTable.WritableCols(), testutil.RowsWithSep(" ", "2 3 3"))
return errors.Trace(err)
}
func touchedSlice(t table.Table) []bool {
touched := make([]bool, 0, len(t.WritableCols()))
for range t.WritableCols() {
touched = append(touched, true)
}
return touched
}
func (s *testColumnChangeSuite) checkAddPublic(sctx sessionctx.Context, d *ddl, writeOnlyTable, publicTable table.Table) error {
ctx := context.TODO()
// publicTable Insert t values (4, 4, 4)
err := sctx.NewTxn(ctx)
if err != nil {
return errors.Trace(err)
}
h, err := publicTable.AddRecord(sctx, types.MakeDatums(4, 4, 4))
if err != nil {
return errors.Trace(err)
}
err = sctx.NewTxn(ctx)
if err != nil {
return errors.Trace(err)
}
// writeOnlyTable update t set c1 = 3 where c1 = 4
oldRow, err := tables.RowWithCols(writeOnlyTable, sctx, h, writeOnlyTable.WritableCols())
if err != nil {
return errors.Trace(err)
}
if len(oldRow) != 3 {
return errors.Errorf("%v", oldRow)
}
newRow := types.MakeDatums(3, 4, oldRow[2].GetValue())
err = writeOnlyTable.UpdateRecord(context.Background(), sctx, h, oldRow, newRow, touchedSlice(writeOnlyTable))
if err != nil {
return errors.Trace(err)
}
err = sctx.NewTxn(ctx)
if err != nil {
return errors.Trace(err)
}
// publicTable select * from t, make sure the new c3 value 4 is not overwritten to default value 3.
err = checkResult(sctx, publicTable, publicTable.WritableCols(), testutil.RowsWithSep(" ", "2 3 3", "3 4 4"))
if err != nil {
return errors.Trace(err)
}
return nil
}
func getCurrentTable(d *ddl, schemaID, tableID int64) (table.Table, error) {
var tblInfo *model.TableInfo
err := kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
var err error
tblInfo, err = t.GetTable(schemaID, tableID)
if err != nil {
return errors.Trace(err)
}
return nil
})
if err != nil {
return nil, errors.Trace(err)
}
alloc := autoid.NewAllocator(d.store, schemaID, false, autoid.RowIDAllocType)
tbl, err := table.TableFromMeta(autoid.NewAllocators(alloc), tblInfo)
if err != nil {
return nil, errors.Trace(err)
}
return tbl, err
}
func checkResult(ctx sessionctx.Context, t table.Table, cols []*table.Column, rows [][]interface{}) error {
var gotRows [][]interface{}
err := tables.IterRecords(t, ctx, cols, func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
gotRows = append(gotRows, datumsToInterfaces(data))
return true, nil
})
if err != nil {
return err
}
got := fmt.Sprintf("%v", gotRows)
expect := fmt.Sprintf("%v", rows)
if got != expect {
return errors.Errorf("expect %v, got %v", expect, got)
}
return nil
}
func datumsToInterfaces(datums []types.Datum) []interface{} {
ifs := make([]interface{}, 0, len(datums))
for _, d := range datums {
ifs = append(ifs, d.GetValue())
}
return ifs
}
| ddl/column_change_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0009393073269166052,
0.00021616143931169063,
0.00016439944738522172,
0.00016897548630367965,
0.0001377723237965256
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\t// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring\n",
"\t\t// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.\n",
"\t\tif msBeforeTxnExpired > 0 {\n",
"\t\t\tif action.LockWaitTime == tidbkv.LockNoWait {\n",
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif action.LockWaitTime == LockNoWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package memo
import (
plannercore "github.com/pingcap/tidb/planner/core"
)
// Operand is the node of a pattern tree, it represents a logical expression operator.
// Different from logical plan operator which holds the full information about an expression
// operator, Operand only stores the type information.
// An Operand may correspond to a concrete logical plan operator, or it can has special meaning,
// e.g, a placeholder for any logical plan operator.
type Operand int
const (
// OperandAny is a placeholder for any Operand.
OperandAny Operand = iota
// OperandJoin is the operand for LogicalJoin.
OperandJoin
// OperandAggregation is the operand for LogicalAggregation.
OperandAggregation
// OperandProjection is the operand for LogicalProjection.
OperandProjection
// OperandSelection is the operand for LogicalSelection.
OperandSelection
// OperandApply is the operand for LogicalApply.
OperandApply
// OperandMaxOneRow is the operand for LogicalMaxOneRow.
OperandMaxOneRow
// OperandTableDual is the operand for LogicalTableDual.
OperandTableDual
// OperandDataSource is the operand for DataSource.
OperandDataSource
// OperandUnionScan is the operand for LogicalUnionScan.
OperandUnionScan
// OperandUnionAll is the operand for LogicalUnionAll.
OperandUnionAll
// OperandSort is the operand for LogicalSort.
OperandSort
// OperandTopN is the operand for LogicalTopN.
OperandTopN
// OperandLock is the operand for LogicalLock.
OperandLock
// OperandLimit is the operand for LogicalLimit.
OperandLimit
// OperandTiKVSingleGather is the operand for TiKVSingleGather.
OperandTiKVSingleGather
// OperandMemTableScan is the operand for MemTableScan.
OperandMemTableScan
// OperandTableScan is the operand for TableScan.
OperandTableScan
// OperandIndexScan is the operand for IndexScan.
OperandIndexScan
// OperandShow is the operand for Show.
OperandShow
// OperandWindow is the operand for window function.
OperandWindow
// OperandUnsupported is the operand for unsupported operators.
OperandUnsupported
)
// GetOperand maps logical plan operator to Operand.
func GetOperand(p plannercore.LogicalPlan) Operand {
switch p.(type) {
case *plannercore.LogicalApply:
return OperandApply
case *plannercore.LogicalJoin:
return OperandJoin
case *plannercore.LogicalAggregation:
return OperandAggregation
case *plannercore.LogicalProjection:
return OperandProjection
case *plannercore.LogicalSelection:
return OperandSelection
case *plannercore.LogicalMaxOneRow:
return OperandMaxOneRow
case *plannercore.LogicalTableDual:
return OperandTableDual
case *plannercore.DataSource:
return OperandDataSource
case *plannercore.LogicalUnionScan:
return OperandUnionScan
case *plannercore.LogicalUnionAll:
return OperandUnionAll
case *plannercore.LogicalSort:
return OperandSort
case *plannercore.LogicalTopN:
return OperandTopN
case *plannercore.LogicalLock:
return OperandLock
case *plannercore.LogicalLimit:
return OperandLimit
case *plannercore.TiKVSingleGather:
return OperandTiKVSingleGather
case *plannercore.LogicalTableScan:
return OperandTableScan
case *plannercore.LogicalMemTable:
return OperandMemTableScan
case *plannercore.LogicalIndexScan:
return OperandIndexScan
case *plannercore.LogicalShow:
return OperandShow
case *plannercore.LogicalWindow:
return OperandWindow
default:
return OperandUnsupported
}
}
// Match checks if current Operand matches specified one.
func (o Operand) Match(t Operand) bool {
if o == OperandAny || t == OperandAny {
return true
}
if o == t {
return true
}
return false
}
// Pattern defines the match pattern for a rule. It's a tree-like structure
// which is a piece of a logical expression. Each node in the Pattern tree is
// defined by an Operand and EngineType pair.
type Pattern struct {
Operand
EngineTypeSet
Children []*Pattern
}
// Match checks whether the EngineTypeSet contains the given EngineType
// and whether the two Operands match.
func (p *Pattern) Match(o Operand, e EngineType) bool {
return p.EngineTypeSet.Contains(e) && p.Operand.Match(o)
}
// MatchOperandAny checks whether the pattern's Operand is OperandAny
// and the EngineTypeSet contains the given EngineType.
func (p *Pattern) MatchOperandAny(e EngineType) bool {
return p.EngineTypeSet.Contains(e) && p.Operand == OperandAny
}
// NewPattern creates a pattern node according to the Operand and EngineType.
func NewPattern(operand Operand, engineTypeSet EngineTypeSet) *Pattern {
return &Pattern{Operand: operand, EngineTypeSet: engineTypeSet}
}
// SetChildren sets the Children information for a pattern node.
func (p *Pattern) SetChildren(children ...*Pattern) {
p.Children = children
}
// BuildPattern builds a Pattern from Operand, EngineType and child Patterns.
// Used in GetPattern() of Transformation interface to generate a Pattern.
func BuildPattern(operand Operand, engineTypeSet EngineTypeSet, children ...*Pattern) *Pattern {
p := &Pattern{Operand: operand, EngineTypeSet: engineTypeSet}
p.Children = children
return p
}
| planner/memo/pattern.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00045709279947914183,
0.00018641188216861337,
0.0001614336943021044,
0.0001698971027508378,
0.00006600518099730834
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\t// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring\n",
"\t\t// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.\n",
"\t\tif msBeforeTxnExpired > 0 {\n",
"\t\t\tif action.LockWaitTime == tidbkv.LockNoWait {\n",
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\t\tif action.LockWaitTime == LockNoWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 178
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"strings"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
// vecEvalDecimal evals a builtinGreatestDecimalSig.
// See http://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_greatest
func (b *builtinGreatestDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, result); err != nil {
return err
}
d64s := result.Decimals()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
v := buf.GetDecimal(i)
if v.Compare(&d64s[i]) > 0 {
d64s[i] = *v
}
}
}
return nil
}
func (b *builtinGreatestDecimalSig) vectorized() bool {
return true
}
func (b *builtinLeastDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, result); err != nil {
return err
}
d64s := result.Decimals()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
v := buf.GetDecimal(i)
if v.Compare(&d64s[i]) < 0 {
d64s[i] = *v
}
}
}
return nil
}
func (b *builtinLeastDecimalSig) vectorized() bool {
return true
}
func (b *builtinLeastIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
v := buf.GetInt64(i)
if v < i64s[i] {
i64s[i] = v
}
}
}
return nil
}
func (b *builtinLeastIntSig) vectorized() bool {
return true
}
func (b *builtinGreatestIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
v := buf.Int64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if v[i] > i64s[i] {
i64s[i] = v[i]
}
}
}
return nil
}
func (b *builtinGreatestIntSig) vectorized() bool {
return true
}
func (b *builtinGEIntSig) vectorized() bool {
return true
}
func (b *builtinGEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err = b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err = b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfGE(result.Int64s())
return nil
}
func (b *builtinLeastRealSig) vectorized() bool {
return true
}
func (b *builtinLeastRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil {
return err
}
f64s := result.Float64s()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
v := buf.Float64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if v[i] < f64s[i] {
f64s[i] = v[i]
}
}
}
return nil
}
func (b *builtinLeastStringSig) vectorized() bool {
return true
}
func (b *builtinLeastStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalString(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
buf1, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
buf2, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf2)
src := result
arg := buf1
dst := buf2
dst.ReserveString(n)
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalString(b.ctx, input, arg); err != nil {
return err
}
for i := 0; i < n; i++ {
if src.IsNull(i) || arg.IsNull(i) {
dst.AppendNull()
continue
}
srcStr := src.GetString(i)
argStr := arg.GetString(i)
if types.CompareString(srcStr, argStr, b.collation) < 0 {
dst.AppendString(srcStr)
} else {
dst.AppendString(argStr)
}
}
src, dst = dst, src
arg.ReserveString(n)
dst.ReserveString(n)
}
if len(b.args)%2 == 0 {
src.CopyConstruct(result)
}
return nil
}
func (b *builtinEQIntSig) vectorized() bool {
return true
}
func (b *builtinEQIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfEQ(result.Int64s())
return nil
}
func (b *builtinNEIntSig) vectorized() bool {
return true
}
func (b *builtinNEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfNE(result.Int64s())
return nil
}
func (b *builtinGTIntSig) vectorized() bool {
return true
}
func (b *builtinGTIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfGT(result.Int64s())
return nil
}
func (b *builtinNullEQIntSig) vectorized() bool {
return true
}
func (b *builtinNullEQIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf0, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
result.ResizeInt64(n, false)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
i64s := result.Int64s()
for i := 0; i < n; i++ {
isNull0 := buf0.IsNull(i)
isNull1 := buf1.IsNull(i)
if isNull0 && isNull1 {
i64s[i] = 1
} else if isNull0 || isNull1 || i64s[i] != 0 {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinIntervalIntSig) vectorized() bool {
return true
}
func (b *builtinIntervalIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
var err error
if err = b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
var idx int
for i, v := range i64s {
if result.IsNull(i) {
result.SetNull(i, false)
i64s[i] = -1
continue
}
if b.hasNullable {
idx, err = b.linearSearch(v, mysql.HasUnsignedFlag(b.args[0].GetType().Flag), b.args[1:], input.GetRow(i))
} else {
idx, err = b.binSearch(v, mysql.HasUnsignedFlag(b.args[0].GetType().Flag), b.args[1:], input.GetRow(i))
}
if err != nil {
return err
}
i64s[i] = int64(idx)
}
return nil
}
func (b *builtinIntervalRealSig) vectorized() bool {
return true
}
func (b *builtinIntervalRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err = b.args[0].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
f64s := buf.Float64s()
result.ResizeInt64(n, false)
res := result.Int64s()
var idx int
for i := 0; i < n; i++ {
if buf.IsNull(i) {
res[i] = -1
continue
}
if b.hasNullable {
idx, err = b.linearSearch(f64s[i], b.args[1:], input.GetRow(i))
} else {
idx, err = b.binSearch(f64s[i], b.args[1:], input.GetRow(i))
}
if err != nil {
return err
}
res[i] = int64(idx)
}
return nil
}
func (b *builtinLEIntSig) vectorized() bool {
return true
}
func (b *builtinLEIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfLE(result.Int64s())
return nil
}
func (b *builtinLTIntSig) vectorized() bool {
return true
}
func (b *builtinLTIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
var err error
var buf0, buf1 *chunk.Column
buf0, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf0)
if err := b.args[0].VecEvalInt(b.ctx, input, buf0); err != nil {
return err
}
buf1, err = b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
if err := b.args[1].VecEvalInt(b.ctx, input, buf1); err != nil {
return err
}
result.ResizeInt64(n, false)
vecCompareInt(mysql.HasUnsignedFlag(b.args[0].GetType().Flag), mysql.HasUnsignedFlag(b.args[1].GetType().Flag), buf0, buf1, result)
result.MergeNulls(buf0, buf1)
vecResOfLT(result.Int64s())
return nil
}
func vecResOfLT(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] < 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
func vecResOfNE(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] != 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
func vecResOfEQ(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] == 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
func vecResOfLE(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] <= 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
func vecResOfGT(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] > 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
func vecResOfGE(res []int64) {
n := len(res)
for i := 0; i < n; i++ {
if res[i] >= 0 {
res[i] = 1
} else {
res[i] = 0
}
}
}
// vecCompareInt is vectorized CompareInt()
func vecCompareInt(isUnsigned0, isUnsigned1 bool, largs, rargs, result *chunk.Column) {
switch {
case isUnsigned0 && isUnsigned1:
types.VecCompareUU(largs.Uint64s(), rargs.Uint64s(), result.Int64s())
case isUnsigned0 && !isUnsigned1:
types.VecCompareUI(largs.Uint64s(), rargs.Int64s(), result.Int64s())
case !isUnsigned0 && isUnsigned1:
types.VecCompareIU(largs.Int64s(), rargs.Uint64s(), result.Int64s())
case !isUnsigned0 && !isUnsigned1:
types.VecCompareII(largs.Int64s(), rargs.Int64s(), result.Int64s())
}
}
func (b *builtinGreatestTimeSig) vectorized() bool {
return true
}
func (b *builtinGreatestTimeSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
sc := b.ctx.GetSessionVars().StmtCtx
n := input.NumRows()
dstStrings := make([]string, n)
// TODO: use Column.MergeNulls instead, however, it doesn't support var-length type currently.
dstNullMap := make([]bool, n)
for j := 0; j < len(b.args); j++ {
if err := b.args[j].VecEvalString(b.ctx, input, result); err != nil {
return err
}
for i := 0; i < n; i++ {
if dstNullMap[i] = dstNullMap[i] || result.IsNull(i); dstNullMap[i] {
continue
}
// NOTE: can't use Column.GetString because it returns an unsafe string, copy the row instead.
argTimeStr := string(result.GetBytes(i))
argTime, err := types.ParseDatetime(sc, argTimeStr)
if err != nil {
if err = handleInvalidTimeError(b.ctx, err); err != nil {
return err
}
} else {
argTimeStr = argTime.String()
}
if j == 0 || strings.Compare(argTimeStr, dstStrings[i]) > 0 {
dstStrings[i] = argTimeStr
}
}
}
// Aggregate the NULL and String value into result
result.ReserveString(n)
for i := 0; i < n; i++ {
if dstNullMap[i] {
result.AppendNull()
} else {
result.AppendString(dstStrings[i])
}
}
return nil
}
func (b *builtinGreatestRealSig) vectorized() bool {
return true
}
func (b *builtinGreatestRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, result); err != nil {
return err
}
f64s := result.Float64s()
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
result.MergeNulls(buf)
v := buf.Float64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if v[i] > f64s[i] {
f64s[i] = v[i]
}
}
}
return nil
}
func (b *builtinLeastTimeSig) vectorized() bool {
return true
}
func (b *builtinLeastTimeSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
sc := b.ctx.GetSessionVars().StmtCtx
n := input.NumRows()
dstStrings := make([]string, n)
// TODO: use Column.MergeNulls instead, however, it doesn't support var-length type currently.
dstNullMap := make([]bool, n)
for j := 0; j < len(b.args); j++ {
if err := b.args[j].VecEvalString(b.ctx, input, result); err != nil {
return err
}
for i := 0; i < n; i++ {
if dstNullMap[i] = dstNullMap[i] || result.IsNull(i); dstNullMap[i] {
continue
}
// NOTE: can't use Column.GetString because it returns an unsafe string, copy the row instead.
argTimeStr := string(result.GetBytes(i))
argTime, err := types.ParseDatetime(sc, argTimeStr)
if err != nil {
if err = handleInvalidTimeError(b.ctx, err); err != nil {
return err
}
} else {
argTimeStr = argTime.String()
}
if j == 0 || strings.Compare(argTimeStr, dstStrings[i]) < 0 {
dstStrings[i] = argTimeStr
}
}
}
// Aggregate the NULL and String value into result
result.ReserveString(n)
for i := 0; i < n; i++ {
if dstNullMap[i] {
result.AppendNull()
} else {
result.AppendString(dstStrings[i])
}
}
return nil
}
func (b *builtinGreatestStringSig) vectorized() bool {
return true
}
func (b *builtinGreatestStringSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalString(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
buf1, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
buf2, err := b.bufAllocator.get(types.ETString, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf2)
src := result
arg := buf1
dst := buf2
dst.ReserveString(n)
for j := 1; j < len(b.args); j++ {
if err := b.args[j].VecEvalString(b.ctx, input, arg); err != nil {
return err
}
for i := 0; i < n; i++ {
if src.IsNull(i) || arg.IsNull(i) {
dst.AppendNull()
continue
}
srcStr := src.GetString(i)
argStr := arg.GetString(i)
if types.CompareString(srcStr, argStr, b.collation) > 0 {
dst.AppendString(srcStr)
} else {
dst.AppendString(argStr)
}
}
src, dst = dst, src
arg.ReserveString(n)
dst.ReserveString(n)
}
if len(b.args)%2 == 0 {
src.CopyConstruct(result)
}
return nil
}
| expression/builtin_compare_vec.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0005174874677322805,
0.0001801868638722226,
0.00016528909327462316,
0.0001692358055151999,
0.00005211946336203255
] |
{
"id": 9,
"code_window": [
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n",
"\t\t\t} else if action.LockWaitTime == tidbkv.LockAlwaysWait {\n",
"\t\t\t\t// do nothing but keep wait\n",
"\t\t\t} else {\n",
"\t\t\t\t// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t} else if action.LockWaitTime == LockAlwaysWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 180
} | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"context"
"crypto/tls"
"time"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/config"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/util/memory"
)
// UnCommitIndexKVFlag uses to indicate the index key/value is no need to commit.
// This is used in the situation of the index key/value was unchanged when do update.
// Usage:
// 1. For non-unique index: normally, the index value is '0'.
// Change the value to '1' indicate the index key/value is no need to commit.
// 2. For unique index: normally, the index value is the record handle ID, 8 bytes.
// Append UnCommitIndexKVFlag to the value indicate the index key/value is no need to commit.
const UnCommitIndexKVFlag byte = '1'
// Those limits is enforced to make sure the transaction can be well handled by TiKV.
var (
// TxnEntrySizeLimit is limit of single entry size (len(key) + len(value)).
TxnEntrySizeLimit uint64 = config.DefTxnEntrySizeLimit
// TxnTotalSizeLimit is limit of the sum of all entry size.
TxnTotalSizeLimit uint64 = config.DefTxnTotalSizeLimit
)
// FlagsOp describes KeyFlags modify operation. TODO:remove it when br is ready
type FlagsOp = tikvstore.FlagsOp
// Getter is the interface for the Get method.
type Getter interface {
// Get gets the value for key k from kv store.
// If corresponding kv pair does not exist, it returns nil and ErrNotExist.
Get(ctx context.Context, k Key) ([]byte, error)
}
// Retriever is the interface wraps the basic Get and Seek methods.
type Retriever interface {
Getter
// Iter creates an Iterator positioned on the first entry that k <= entry's key.
// If such entry is not found, it returns an invalid Iterator with no error.
// It yields only keys that < upperBound. If upperBound is nil, it means the upperBound is unbounded.
// The Iterator must be Closed after use.
Iter(k Key, upperBound Key) (Iterator, error)
// IterReverse creates a reversed Iterator positioned on the first entry which key is less than k.
// The returned iterator will iterate from greater key to smaller key.
// If k is nil, the returned iterator will be positioned at the last key.
// TODO: Add lower bound limit
IterReverse(k Key) (Iterator, error)
}
// Mutator is the interface wraps the basic Set and Delete methods.
type Mutator interface {
// Set sets the value for key k as v into kv store.
// v must NOT be nil or empty, otherwise it returns ErrCannotSetNilValue.
Set(k Key, v []byte) error
// Delete removes the entry for key k from kv store.
Delete(k Key) error
}
// StagingHandle is the reference of a staging buffer.
type StagingHandle int
var (
// InvalidStagingHandle is an invalid handler, MemBuffer will check handler to ensure safety.
InvalidStagingHandle StagingHandle = 0
// LastActiveStagingHandle is an special handler which always point to the last active staging buffer.
LastActiveStagingHandle StagingHandle = -1
)
// RetrieverMutator is the interface that groups Retriever and Mutator interfaces.
type RetrieverMutator interface {
Retriever
Mutator
}
// MemBuffer is an in-memory kv collection, can be used to buffer write operations.
type MemBuffer interface {
RetrieverMutator
// RLock locks the MemBuffer for shared read.
// In the most case, MemBuffer will only used by single goroutine,
// but it will be read by multiple goroutine when combined with executor.UnionScanExec.
// To avoid race introduced by executor.UnionScanExec, MemBuffer expose read lock for it.
RLock()
// RUnlock unlocks the MemBuffer.
RUnlock()
// GetFlags returns the latest flags associated with key.
GetFlags(Key) (tikvstore.KeyFlags, error)
// SetWithFlags put key-value into the last active staging buffer with the given KeyFlags.
SetWithFlags(Key, []byte, ...tikvstore.FlagsOp) error
// DeleteWithFlags delete key with the given KeyFlags
DeleteWithFlags(Key, ...tikvstore.FlagsOp) error
// Staging create a new staging buffer inside the MemBuffer.
// Subsequent writes will be temporarily stored in this new staging buffer.
// When you think all modifications looks good, you can call `Release` to public all of them to the upper level buffer.
Staging() StagingHandle
// Release publish all modifications in the latest staging buffer to upper level.
Release(StagingHandle)
// Cleanup cleanup the resources referenced by the StagingHandle.
// If the changes are not published by `Release`, they will be discarded.
Cleanup(StagingHandle)
// InspectStage used to inspect the value updates in the given stage.
InspectStage(StagingHandle, func(Key, tikvstore.KeyFlags, []byte))
// SnapshotGetter returns a Getter for a snapshot of MemBuffer.
SnapshotGetter() Getter
// SnapshotIter returns a Iterator for a snapshot of MemBuffer.
SnapshotIter(k, upperbound Key) Iterator
// Len returns the number of entries in the DB.
Len() int
}
// LockCtx contains information for LockKeys method.
type LockCtx = tikvstore.LockCtx
// Transaction defines the interface for operations inside a Transaction.
// This is not thread safe.
type Transaction interface {
RetrieverMutator
// Size returns sum of keys and values length.
Size() int
// Len returns the number of entries in the DB.
Len() int
// Reset reset the Transaction to initial states.
Reset()
// Commit commits the transaction operations to KV store.
Commit(context.Context) error
// Rollback undoes the transaction operations to KV store.
Rollback() error
// String implements fmt.Stringer interface.
String() string
// LockKeys tries to lock the entries with the keys in KV store.
LockKeys(ctx context.Context, lockCtx *LockCtx, keys ...Key) error
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option.
SetOption(opt int, val interface{})
// GetOption returns the option
GetOption(opt int) interface{}
// DelOption deletes an option.
DelOption(opt int)
// IsReadOnly checks if the transaction has only performed read operations.
IsReadOnly() bool
// StartTS returns the transaction start timestamp.
StartTS() uint64
// Valid returns if the transaction is valid.
// A transaction become invalid after commit or rollback.
Valid() bool
// GetMemBuffer return the MemBuffer binding to this transaction.
GetMemBuffer() MemBuffer
// GetSnapshot returns the Snapshot binding to this transaction.
GetSnapshot() Snapshot
// GetUnionStore returns the UnionStore binding to this transaction.
GetUnionStore() UnionStore
// SetVars sets variables to the transaction.
SetVars(vars interface{})
// GetVars gets variables from the transaction.
GetVars() interface{}
// BatchGet gets kv from the memory buffer of statement and transaction, and the kv storage.
// Do not use len(value) == 0 or value == nil to represent non-exist.
// If a key doesn't exist, there shouldn't be any corresponding entry in the result map.
BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error)
IsPessimistic() bool
// CacheIndexName caches the index name.
// PresumeKeyNotExists will use this to help decode error message.
CacheTableInfo(id int64, info *model.TableInfo)
// GetIndexName returns the cached index name.
// If there is no such index already inserted through CacheIndexName, it will return UNKNOWN.
GetTableInfo(id int64) *model.TableInfo
}
// Client is used to send request to KV layer.
type Client interface {
// Send sends request to KV layer, returns a Response.
Send(ctx context.Context, req *Request, vars interface{}, sessionMemTracker *memory.Tracker, enabledRateLimitAction bool) Response
// IsRequestTypeSupported checks if reqType and subType is supported.
IsRequestTypeSupported(reqType, subType int64) bool
}
// ReqTypes.
const (
ReqTypeSelect = 101
ReqTypeIndex = 102
ReqTypeDAG = 103
ReqTypeAnalyze = 104
ReqTypeChecksum = 105
ReqSubTypeBasic = 0
ReqSubTypeDesc = 10000
ReqSubTypeGroupBy = 10001
ReqSubTypeTopN = 10002
ReqSubTypeSignature = 10003
ReqSubTypeAnalyzeIdx = 10004
ReqSubTypeAnalyzeCol = 10005
)
// StoreType represents the type of a store.
type StoreType uint8
const (
// TiKV means the type of a store is TiKV.
TiKV StoreType = iota
// TiFlash means the type of a store is TiFlash.
TiFlash
// TiDB means the type of a store is TiDB.
TiDB
// UnSpecified means the store type is unknown
UnSpecified = 255
)
// Name returns the name of store type.
func (t StoreType) Name() string {
if t == TiFlash {
return "tiflash"
} else if t == TiDB {
return "tidb"
} else if t == TiKV {
return "tikv"
}
return "unspecified"
}
// Request represents a kv request.
type Request struct {
// Tp is the request type.
Tp int64
StartTs uint64
Data []byte
KeyRanges []KeyRange
// Concurrency is 1, if it only sends the request to a single storage unit when
// ResponseIterator.Next is called. If concurrency is greater than 1, the request will be
// sent to multiple storage units concurrently.
Concurrency int
// IsolationLevel is the isolation level, default is SI.
IsolationLevel tikvstore.IsoLevel
// Priority is the priority of this KV request, its value may be PriorityNormal/PriorityLow/PriorityHigh.
Priority int
// memTracker is used to trace and control memory usage in co-processor layer.
MemTracker *memory.Tracker
// KeepOrder is true, if the response should be returned in order.
KeepOrder bool
// Desc is true, if the request is sent in descending order.
Desc bool
// NotFillCache makes this request do not touch the LRU cache of the underlying storage.
NotFillCache bool
// SyncLog decides whether the WAL(write-ahead log) of this request should be synchronized.
SyncLog bool
// Streaming indicates using streaming API for this request, result in that one Next()
// call would not corresponds to a whole region result.
Streaming bool
// ReplicaRead is used for reading data from replicas, only follower is supported at this time.
ReplicaRead tikvstore.ReplicaReadType
// StoreType represents this request is sent to the which type of store.
StoreType StoreType
// Cacheable is true if the request can be cached. Currently only deterministic DAG requests can be cached.
Cacheable bool
// SchemaVer is for any schema-ful storage to validate schema correctness if necessary.
SchemaVar int64
// BatchCop indicates whether send batch coprocessor request to tiflash.
BatchCop bool
// TaskID is an unique ID for an execution of a statement
TaskID uint64
// TiDBServerID is the specified TiDB serverID to execute request. `0` means all TiDB instances.
TiDBServerID uint64
// IsStaleness indicates whether the request read staleness data
IsStaleness bool
// MatchStoreLabels indicates the labels the store should be matched
MatchStoreLabels []*metapb.StoreLabel
}
// ResultSubset represents a result subset from a single storage unit.
// TODO: Find a better interface for ResultSubset that can reuse bytes.
type ResultSubset interface {
// GetData gets the data.
GetData() []byte
// GetStartKey gets the start key.
GetStartKey() Key
// MemSize returns how many bytes of memory this result use for tracing memory usage.
MemSize() int64
// RespTime returns the response time for the request.
RespTime() time.Duration
}
// Response represents the response returned from KV layer.
type Response interface {
// Next returns a resultSubset from a single storage unit.
// When full result set is returned, nil is returned.
Next(ctx context.Context) (resultSubset ResultSubset, err error)
// Close response.
Close() error
}
// Snapshot defines the interface for the snapshot fetched from KV store.
type Snapshot interface {
Retriever
// BatchGet gets a batch of values from snapshot.
BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error)
// SetOption sets an option with a value, when val is nil, uses the default
// value of this option. Only ReplicaRead is supported for snapshot
SetOption(opt int, val interface{})
// DelOption deletes an option.
DelOption(opt int)
}
// BatchGetter is the interface for BatchGet.
type BatchGetter interface {
// BatchGet gets a batch of values.
BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error)
}
// Driver is the interface that must be implemented by a KV storage.
type Driver interface {
// Open returns a new Storage.
// The path is the string for storage specific format.
Open(path string) (Storage, error)
}
// TransactionOption indicates the option when beginning a transaction
type TransactionOption struct {
TxnScope string
StartTS *uint64
PrevSec *uint64
}
// SetStartTs set startTS
func (to TransactionOption) SetStartTs(startTS uint64) TransactionOption {
to.StartTS = &startTS
return to
}
// SetPrevSec set prevSec
func (to TransactionOption) SetPrevSec(prevSec uint64) TransactionOption {
to.PrevSec = &prevSec
return to
}
// SetTxnScope set txnScope
func (to TransactionOption) SetTxnScope(txnScope string) TransactionOption {
to.TxnScope = txnScope
return to
}
// Storage defines the interface for storage.
// Isolation should be at least SI(SNAPSHOT ISOLATION)
type Storage interface {
// Begin a global transaction
Begin() (Transaction, error)
// Begin a transaction with given option
BeginWithOption(option TransactionOption) (Transaction, error)
// GetSnapshot gets a snapshot that is able to read any data which data is <= ver.
// if ver is MaxVersion or > current max committed version, we will use current version for this snapshot.
GetSnapshot(ver Version) Snapshot
// GetClient gets a client instance.
GetClient() Client
// GetMPPClient gets a mpp client instance.
GetMPPClient() MPPClient
// Close store
Close() error
// UUID return a unique ID which represents a Storage.
UUID() string
// CurrentVersion returns current max committed version with the given txnScope (local or global).
CurrentVersion(txnScope string) (Version, error)
// GetOracle gets a timestamp oracle client.
GetOracle() oracle.Oracle
// SupportDeleteRange gets the storage support delete range or not.
SupportDeleteRange() (supported bool)
// Name gets the name of the storage engine
Name() string
// Describe returns of brief introduction of the storage
Describe() string
// ShowStatus returns the specified status of the storage
ShowStatus(ctx context.Context, key string) (interface{}, error)
// GetMemCache return memory manager of the storage.
GetMemCache() MemManager
}
// EtcdBackend is used for judging a storage is a real TiKV.
type EtcdBackend interface {
EtcdAddrs() ([]string, error)
TLSConfig() *tls.Config
StartGCWorker() error
}
// FnKeyCmp is the function for iterator the keys
type FnKeyCmp func(key Key) bool
// Iterator is the interface for a iterator on KV store.
type Iterator interface {
Valid() bool
Key() Key
Value() []byte
Next() error
Close()
}
// SplittableStore is the kv store which supports split regions.
type SplittableStore interface {
SplitRegions(ctx context.Context, splitKey [][]byte, scatter bool, tableID *int64) (regionID []uint64, err error)
WaitScatterRegionFinish(ctx context.Context, regionID uint64, backOff int) error
CheckRegionInScattering(regionID uint64) (bool, error)
}
// Used for pessimistic lock wait time
// these two constants are special for lock protocol with tikv
// 0 means always wait, -1 means nowait, others meaning lock wait in milliseconds
var (
LockAlwaysWait = int64(0)
LockNoWait = int64(-1)
)
| kv/kv.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.004006103612482548,
0.0004561881360132247,
0.00016196486831177026,
0.00017706707876641303,
0.0007461969507858157
] |
{
"id": 9,
"code_window": [
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n",
"\t\t\t} else if action.LockWaitTime == tidbkv.LockAlwaysWait {\n",
"\t\t\t\t// do nothing but keep wait\n",
"\t\t\t} else {\n",
"\t\t\t\t// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t} else if action.LockWaitTime == LockAlwaysWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 180
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"context"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/store/tikv/oracle"
)
// mockTxn is a txn that returns a retryAble error when called Commit.
type mockTxn struct {
opts map[int]interface{}
valid bool
}
// Commit always returns a retryable error.
func (t *mockTxn) Commit(ctx context.Context) error {
return ErrTxnRetryable
}
func (t *mockTxn) Rollback() error {
t.valid = false
return nil
}
func (t *mockTxn) String() string {
return ""
}
func (t *mockTxn) LockKeys(_ context.Context, _ *LockCtx, _ ...Key) error {
return nil
}
func (t *mockTxn) SetOption(opt int, val interface{}) {
t.opts[opt] = val
}
func (t *mockTxn) DelOption(opt int) {
delete(t.opts, opt)
}
func (t *mockTxn) GetOption(opt int) interface{} {
return t.opts[opt]
}
func (t *mockTxn) IsReadOnly() bool {
return true
}
func (t *mockTxn) StartTS() uint64 {
return uint64(0)
}
func (t *mockTxn) Get(ctx context.Context, k Key) ([]byte, error) {
return nil, nil
}
func (t *mockTxn) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) {
return nil, nil
}
func (t *mockTxn) Iter(k Key, upperBound Key) (Iterator, error) {
return nil, nil
}
func (t *mockTxn) IterReverse(k Key) (Iterator, error) {
return nil, nil
}
func (t *mockTxn) Set(k Key, v []byte) error {
return nil
}
func (t *mockTxn) Delete(k Key) error {
return nil
}
func (t *mockTxn) Valid() bool {
return t.valid
}
func (t *mockTxn) Len() int {
return 0
}
func (t *mockTxn) Size() int {
return 0
}
func (t *mockTxn) GetMemBuffer() MemBuffer {
return nil
}
func (t *mockTxn) GetSnapshot() Snapshot {
return nil
}
func (t *mockTxn) GetUnionStore() UnionStore {
return nil
}
func (t *mockTxn) NewStagingBuffer() MemBuffer {
return nil
}
func (t *mockTxn) Flush() (int, error) {
return 0, nil
}
func (t *mockTxn) Discard() {
}
func (t *mockTxn) Reset() {
t.valid = false
}
func (t *mockTxn) SetVars(vars interface{}) {
}
func (t *mockTxn) GetVars() interface{} {
return nil
}
func (t *mockTxn) CacheTableInfo(id int64, info *model.TableInfo) {
}
func (t *mockTxn) GetTableInfo(id int64) *model.TableInfo {
return nil
}
// newMockTxn new a mockTxn.
func newMockTxn() Transaction {
return &mockTxn{
opts: make(map[int]interface{}),
valid: true,
}
}
// mockStorage is used to start a must commit-failed txn.
type mockStorage struct {
}
func (s *mockStorage) Begin() (Transaction, error) {
return newMockTxn(), nil
}
func (s *mockStorage) BeginWithOption(option TransactionOption) (Transaction, error) {
return newMockTxn(), nil
}
func (*mockTxn) IsPessimistic() bool {
return false
}
func (s *mockStorage) GetSnapshot(ver Version) Snapshot {
return &mockSnapshot{
store: newMockMap(),
}
}
func (s *mockStorage) Close() error {
return nil
}
func (s *mockStorage) UUID() string {
return ""
}
// CurrentVersion returns current max committed version.
func (s *mockStorage) CurrentVersion(txnScope string) (Version, error) {
return NewVersion(1), nil
}
func (s *mockStorage) GetClient() Client {
return nil
}
func (s *mockStorage) GetMPPClient() MPPClient {
return nil
}
func (s *mockStorage) GetOracle() oracle.Oracle {
return nil
}
func (s *mockStorage) SupportDeleteRange() (supported bool) {
return false
}
func (s *mockStorage) Name() string {
return "KVMockStorage"
}
func (s *mockStorage) Describe() string {
return "KVMockStorage is a mock Store implementation, only for unittests in KV package"
}
func (s *mockStorage) ShowStatus(ctx context.Context, key string) (interface{}, error) {
return nil, nil
}
func (s *mockStorage) GetMemCache() MemManager {
return nil
}
// newMockStorage creates a new mockStorage.
func newMockStorage() Storage {
return &mockStorage{}
}
type mockSnapshot struct {
store Retriever
}
func (s *mockSnapshot) Get(ctx context.Context, k Key) ([]byte, error) {
return s.store.Get(ctx, k)
}
func (s *mockSnapshot) SetPriority(priority int) {
}
func (s *mockSnapshot) BatchGet(ctx context.Context, keys []Key) (map[string][]byte, error) {
m := make(map[string][]byte, len(keys))
for _, k := range keys {
v, err := s.store.Get(ctx, k)
if IsErrNotFound(err) {
continue
}
if err != nil {
return nil, err
}
m[string(k)] = v
}
return m, nil
}
func (s *mockSnapshot) Iter(k Key, upperBound Key) (Iterator, error) {
return s.store.Iter(k, upperBound)
}
func (s *mockSnapshot) IterReverse(k Key) (Iterator, error) {
return s.store.IterReverse(k)
}
func (s *mockSnapshot) SetOption(opt int, val interface{}) {}
func (s *mockSnapshot) DelOption(opt int) {}
| kv/interface_mock_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.003845575265586376,
0.0003102846385445446,
0.0001655373489484191,
0.0001677577820373699,
0.000693623733241111
] |
{
"id": 9,
"code_window": [
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n",
"\t\t\t} else if action.LockWaitTime == tidbkv.LockAlwaysWait {\n",
"\t\t\t\t// do nothing but keep wait\n",
"\t\t\t} else {\n",
"\t\t\t\t// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t} else if action.LockWaitTime == LockAlwaysWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 180
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"fmt"
"math"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
func (b *builtinTimeIsNullSig) vectorized() bool {
return true
}
func (b *builtinTimeIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDatetime, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalTime(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
if buf.IsNull(i) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinLogicOrSig) vectorized() bool {
return true
}
func (b *builtinLogicOrSig) fallbackEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
result.ResizeInt64(n, false)
x := result.Int64s()
for i := 0; i < n; i++ {
res, isNull, err := b.evalInt(input.GetRow(i))
if err != nil {
return err
}
result.SetNull(i, isNull)
if isNull {
continue
}
x[i] = res
}
return nil
}
func (b *builtinLogicOrSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
sc := b.ctx.GetSessionVars().StmtCtx
beforeWarns := sc.WarningCount()
err = b.args[1].VecEvalInt(b.ctx, input, buf)
afterWarns := sc.WarningCount()
if err != nil || afterWarns > beforeWarns {
if afterWarns > beforeWarns {
sc.TruncateWarnings(int(beforeWarns))
}
return b.fallbackEvalInt(input, result)
}
i64s := result.Int64s()
arg1s := buf.Int64s()
for i := 0; i < n; i++ {
isNull0 := result.IsNull(i)
isNull1 := buf.IsNull(i)
// Because buf is used to store the conversion of args[0] in place, it could
// be that args[0] is null and args[1] is nonzero, in which case the result
// is 1. In these cases, we need to clear the null bit mask of the corresponding
// row in result.
// See https://dev.mysql.com/doc/refman/5.7/en/logical-operators.html#operator_or
isNull := false
if (!isNull0 && i64s[i] != 0) || (!isNull1 && arg1s[i] != 0) {
i64s[i] = 1
} else if isNull0 || isNull1 {
isNull = true
} else {
i64s[i] = 0
}
if isNull != isNull0 {
result.SetNull(i, isNull)
}
}
return nil
}
func (b *builtinBitOrSig) vectorized() bool {
return true
}
func (b *builtinBitOrSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
arg0s := result.Int64s()
arg1s := buf.Int64s()
result.MergeNulls(buf)
for i := 0; i < numRows; i++ {
arg0s[i] |= arg1s[i]
}
return nil
}
func (b *builtinDecimalIsFalseSig) vectorized() bool {
return true
}
func (b *builtinDecimalIsFalseSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
decs := buf.Decimals()
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
isNull := buf.IsNull(i)
if b.keepNull && isNull {
result.SetNull(i, true)
continue
}
if isNull || !decs[i].IsZero() {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinIntIsFalseSig) vectorized() bool {
return true
}
func (b *builtinIntIsFalseSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
isNull := result.IsNull(i)
if b.keepNull && isNull {
continue
}
if isNull {
i64s[i] = 0
result.SetNull(i, false)
} else if i64s[i] != 0 {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinUnaryMinusRealSig) vectorized() bool {
return true
}
func (b *builtinUnaryMinusRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.Column) error {
var err error
if err = b.args[0].VecEvalReal(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
f64s := result.Float64s()
for i := 0; i < n; i++ {
f64s[i] = -f64s[i]
}
return nil
}
func (b *builtinBitNegSig) vectorized() bool {
return true
}
func (b *builtinBitNegSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
args := result.Int64s()
for i := 0; i < n; i++ {
args[i] = ^args[i]
}
return nil
}
func (b *builtinUnaryMinusDecimalSig) vectorized() bool {
return true
}
func (b *builtinUnaryMinusDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalDecimal(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
decs := result.Decimals()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
decs[i] = *types.DecimalNeg(&decs[i])
}
return nil
}
func (b *builtinIntIsNullSig) vectorized() bool {
return true
}
func (b *builtinIntIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for i := 0; i < len(i64s); i++ {
if result.IsNull(i) {
i64s[i] = 1
result.SetNull(i, false)
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinRealIsNullSig) vectorized() bool {
return true
}
func (b *builtinRealIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
if buf.IsNull(i) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinUnaryNotRealSig) vectorized() bool {
return true
}
func (b *builtinUnaryNotRealSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
f64s := buf.Float64s()
result.ResizeInt64(n, false)
result.MergeNulls(buf)
i64s := result.Int64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if f64s[i] == 0 {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinLogicAndSig) vectorized() bool {
return true
}
func (b *builtinLogicAndSig) fallbackEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
result.ResizeInt64(n, false)
x := result.Int64s()
for i := 0; i < n; i++ {
res, isNull, err := b.evalInt(input.GetRow(i))
if err != nil {
return err
}
result.SetNull(i, isNull)
if isNull {
continue
}
x[i] = res
}
return nil
}
func (b *builtinLogicAndSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
buf1, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf1)
sc := b.ctx.GetSessionVars().StmtCtx
beforeWarns := sc.WarningCount()
err = b.args[1].VecEvalInt(b.ctx, input, buf1)
afterWarns := sc.WarningCount()
if err != nil || afterWarns > beforeWarns {
if afterWarns > beforeWarns {
sc.TruncateWarnings(int(beforeWarns))
}
return b.fallbackEvalInt(input, result)
}
i64s := result.Int64s()
arg1 := buf1.Int64s()
for i := 0; i < n; i++ {
isNull0 := result.IsNull(i)
if !isNull0 && i64s[i] == 0 {
result.SetNull(i, false)
continue
}
isNull1 := buf1.IsNull(i)
if !isNull1 && arg1[i] == 0 {
i64s[i] = 0
result.SetNull(i, false)
continue
}
if isNull0 || isNull1 {
result.SetNull(i, true)
continue
}
i64s[i] = 1
}
return nil
}
func (b *builtinBitXorSig) vectorized() bool {
return true
}
func (b *builtinBitXorSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
arg0s := result.Int64s()
arg1s := buf.Int64s()
result.MergeNulls(buf)
for i := 0; i < numRows; i++ {
arg0s[i] ^= arg1s[i]
}
return nil
}
func (b *builtinLogicXorSig) vectorized() bool {
return true
}
func (b *builtinLogicXorSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
i64s := result.Int64s()
arg1s := buf.Int64s()
// Returns NULL if either operand is NULL.
// See https://dev.mysql.com/doc/refman/5.7/en/logical-operators.html#operator_xor
result.MergeNulls(buf)
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
arg0 := i64s[i]
arg1 := arg1s[i]
if (arg0 != 0 && arg1 != 0) || (arg0 == 0 && arg1 == 0) {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinBitAndSig) vectorized() bool {
return true
}
func (b *builtinBitAndSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
arg0s := result.Int64s()
arg1s := buf.Int64s()
result.MergeNulls(buf)
for i := 0; i < numRows; i++ {
arg0s[i] &= arg1s[i]
}
return nil
}
func (b *builtinRealIsFalseSig) vectorized() bool {
return true
}
func (b *builtinRealIsFalseSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
bufF64s := buf.Float64s()
for i := 0; i < numRows; i++ {
isNull := buf.IsNull(i)
if b.keepNull && isNull {
result.SetNull(i, true)
continue
}
if isNull || bufF64s[i] != 0 {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinUnaryMinusIntSig) vectorized() bool {
return true
}
func (b *builtinUnaryMinusIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
n := input.NumRows()
args := result.Int64s()
if mysql.HasUnsignedFlag(b.args[0].GetType().Flag) {
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if uint64(args[i]) > uint64(-math.MinInt64) {
return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", uint64(args[i])))
}
args[i] = -args[i]
}
} else {
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if args[i] == math.MinInt64 {
return types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("-%v", args[i]))
}
args[i] = -args[i]
}
}
return nil
}
func (b *builtinUnaryNotDecimalSig) vectorized() bool {
return true
}
func (b *builtinUnaryNotDecimalSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, n)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
decs := buf.Decimals()
result.ResizeInt64(n, false)
result.MergeNulls(buf)
i64s := result.Int64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if decs[i].IsZero() {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinUnaryNotIntSig) vectorized() bool {
return true
}
func (b *builtinUnaryNotIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
n := input.NumRows()
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for i := 0; i < n; i++ {
if result.IsNull(i) {
continue
}
if i64s[i] == 0 {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinDecimalIsNullSig) vectorized() bool {
return true
}
func (b *builtinDecimalIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
if buf.IsNull(i) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
func (b *builtinLeftShiftSig) vectorized() bool {
return true
}
func (b *builtinLeftShiftSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
arg0s := result.Int64s()
arg1s := buf.Int64s()
result.MergeNulls(buf)
for i := 0; i < numRows; i++ {
arg0s[i] = int64(uint64(arg0s[i]) << uint64(arg1s[i]))
}
return nil
}
func (b *builtinRightShiftSig) vectorized() bool {
return true
}
func (b *builtinRightShiftSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETInt, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil {
return err
}
arg0s := result.Int64s()
arg1s := buf.Int64s()
result.MergeNulls(buf)
for i := 0; i < numRows; i++ {
arg0s[i] = int64(uint64(arg0s[i]) >> uint64(arg1s[i]))
}
return nil
}
func (b *builtinRealIsTrueSig) vectorized() bool {
return true
}
func (b *builtinRealIsTrueSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETReal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
f64s := buf.Float64s()
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
isNull := buf.IsNull(i)
if b.keepNull && isNull {
result.SetNull(i, true)
continue
}
if isNull || f64s[i] == 0 {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinDecimalIsTrueSig) vectorized() bool {
return true
}
func (b *builtinDecimalIsTrueSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDecimal, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDecimal(b.ctx, input, buf); err != nil {
return err
}
decs := buf.Decimals()
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
isNull := buf.IsNull(i)
if b.keepNull && isNull {
result.SetNull(i, true)
continue
}
if isNull || decs[i].IsZero() {
i64s[i] = 0
} else {
i64s[i] = 1
}
}
return nil
}
func (b *builtinIntIsTrueSig) vectorized() bool {
return true
}
func (b *builtinIntIsTrueSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
if err := b.args[0].VecEvalInt(b.ctx, input, result); err != nil {
return err
}
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
isNull := result.IsNull(i)
if b.keepNull && isNull {
continue
}
if isNull {
i64s[i] = 0
result.SetNull(i, false)
} else if i64s[i] != 0 {
i64s[i] = 1
}
}
return nil
}
func (b *builtinDurationIsNullSig) vectorized() bool {
return true
}
func (b *builtinDurationIsNullSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error {
numRows := input.NumRows()
buf, err := b.bufAllocator.get(types.ETDuration, numRows)
if err != nil {
return err
}
defer b.bufAllocator.put(buf)
if err := b.args[0].VecEvalDuration(b.ctx, input, buf); err != nil {
return err
}
result.ResizeInt64(numRows, false)
i64s := result.Int64s()
for i := 0; i < numRows; i++ {
if buf.IsNull(i) {
i64s[i] = 1
} else {
i64s[i] = 0
}
}
return nil
}
| expression/builtin_op_vec.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017710041720420122,
0.0001703517191344872,
0.0001642487186472863,
0.0001707945193629712,
0.000002818370830937056
] |
{
"id": 9,
"code_window": [
"\t\t\t\treturn kv.ErrLockAcquireFailAndNoWaitSet\n",
"\t\t\t} else if action.LockWaitTime == tidbkv.LockAlwaysWait {\n",
"\t\t\t\t// do nothing but keep wait\n",
"\t\t\t} else {\n",
"\t\t\t\t// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t} else if action.LockWaitTime == LockAlwaysWait {\n"
],
"file_path": "store/tikv/pessimistic.go",
"type": "replace",
"edit_start_line_idx": 180
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package copr
import (
"context"
"fmt"
"io"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/cznic/mathutil"
"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/coprocessor"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
tidbmetrics "github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/store/tikv"
tikvstore "github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/store/tikv/util"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
var coprCacheHistogramEvict = tidbmetrics.DistSQLCoprCacheHistogram.WithLabelValues("evict")
// Maximum total sleep time(in ms) for kv/cop commands.
const (
copBuildTaskMaxBackoff = 5000
copNextMaxBackoff = 20000
)
// CopClient is coprocessor client.
type CopClient struct {
kv.RequestTypeSupportedChecker
store *Store
replicaReadSeed uint32
}
// Send builds the request and gets the coprocessor iterator response.
func (c *CopClient) Send(ctx context.Context, req *kv.Request, variables interface{}, sessionMemTracker *memory.Tracker, enabledRateLimitAction bool) kv.Response {
vars, ok := variables.(*tikv.Variables)
if !ok {
return copErrorResponse{errors.Errorf("unsupported variables:%+v", variables)}
}
if req.StoreType == kv.TiFlash && req.BatchCop {
logutil.BgLogger().Debug("send batch requests")
return c.sendBatch(ctx, req, vars)
}
ctx = context.WithValue(ctx, tikv.TxnStartKey, req.StartTs)
bo := tikv.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars)
ranges := toTiKVKeyRanges(req.KeyRanges)
tasks, err := buildCopTasks(bo, c.store.GetRegionCache(), ranges, req)
if err != nil {
return copErrorResponse{err}
}
it := &copIterator{
store: c.store,
req: req,
concurrency: req.Concurrency,
finishCh: make(chan struct{}),
vars: vars,
memTracker: req.MemTracker,
replicaReadSeed: c.replicaReadSeed,
rpcCancel: tikv.NewRPCanceller(),
resolvedLocks: util.NewTSSet(5),
}
it.tasks = tasks
if it.concurrency > len(tasks) {
it.concurrency = len(tasks)
}
if it.concurrency < 1 {
// Make sure that there is at least one worker.
it.concurrency = 1
}
if it.req.KeepOrder {
it.sendRate = util.NewRateLimit(2 * it.concurrency)
it.respChan = nil
} else {
capacity := it.concurrency
if enabledRateLimitAction {
// The count of cached response in memory is controlled by the capacity of the it.sendRate, not capacity of the respChan.
// As the worker will send finCopResponse after each task being handled, we make the capacity of the respCh equals to
// 2*it.concurrency to avoid deadlock in the unit test caused by the `MustExec` or `Exec`
capacity = it.concurrency * 2
}
it.respChan = make(chan *copResponse, capacity)
it.sendRate = util.NewRateLimit(it.concurrency)
}
it.actionOnExceed = newRateLimitAction(uint(it.sendRate.GetCapacity()))
if sessionMemTracker != nil {
sessionMemTracker.FallbackOldAndSetNewAction(it.actionOnExceed)
}
if !it.req.Streaming {
ctx = context.WithValue(ctx, tikv.RPCCancellerCtxKey{}, it.rpcCancel)
}
it.open(ctx, enabledRateLimitAction)
return it
}
// copTask contains a related Region and KeyRange for a kv.Request.
type copTask struct {
region tikv.RegionVerID
ranges *tikv.KeyRanges
respChan chan *copResponse
storeAddr string
cmdType tikvrpc.CmdType
storeType kv.StoreType
}
func (r *copTask) String() string {
return fmt.Sprintf("region(%d %d %d) ranges(%d) store(%s)",
r.region.GetID(), r.region.GetConfVer(), r.region.GetVer(), r.ranges.Len(), r.storeAddr)
}
// rangesPerTask limits the length of the ranges slice sent in one copTask.
const rangesPerTask = 25000
func buildCopTasks(bo *tikv.Backoffer, cache *tikv.RegionCache, ranges *tikv.KeyRanges, req *kv.Request) ([]*copTask, error) {
start := time.Now()
cmdType := tikvrpc.CmdCop
if req.Streaming {
cmdType = tikvrpc.CmdCopStream
}
if req.StoreType == kv.TiDB {
return buildTiDBMemCopTasks(ranges, req)
}
rangesLen := ranges.Len()
var tasks []*copTask
appendTask := func(regionWithRangeInfo *tikv.KeyLocation, ranges *tikv.KeyRanges) {
// TiKV will return gRPC error if the message is too large. So we need to limit the length of the ranges slice
// to make sure the message can be sent successfully.
rLen := ranges.Len()
for i := 0; i < rLen; {
nextI := mathutil.Min(i+rangesPerTask, rLen)
tasks = append(tasks, &copTask{
region: regionWithRangeInfo.Region,
ranges: ranges.Slice(i, nextI),
// Channel buffer is 2 for handling region split.
// In a common case, two region split tasks will not be blocked.
respChan: make(chan *copResponse, 2),
cmdType: cmdType,
storeType: req.StoreType,
})
i = nextI
}
}
err := tikv.SplitKeyRanges(bo, cache, ranges, appendTask)
if err != nil {
return nil, errors.Trace(err)
}
if req.Desc {
reverseTasks(tasks)
}
if elapsed := time.Since(start); elapsed > time.Millisecond*500 {
logutil.BgLogger().Warn("buildCopTasks takes too much time",
zap.Duration("elapsed", elapsed),
zap.Int("range len", rangesLen),
zap.Int("task len", len(tasks)))
}
metrics.TxnRegionsNumHistogramWithCoprocessor.Observe(float64(len(tasks)))
return tasks, nil
}
func buildTiDBMemCopTasks(ranges *tikv.KeyRanges, req *kv.Request) ([]*copTask, error) {
servers, err := infosync.GetAllServerInfo(context.Background())
if err != nil {
return nil, err
}
cmdType := tikvrpc.CmdCop
if req.Streaming {
cmdType = tikvrpc.CmdCopStream
}
tasks := make([]*copTask, 0, len(servers))
for _, ser := range servers {
if req.TiDBServerID > 0 && req.TiDBServerID != ser.ServerIDGetter() {
continue
}
addr := ser.IP + ":" + strconv.FormatUint(uint64(ser.StatusPort), 10)
tasks = append(tasks, &copTask{
ranges: ranges,
respChan: make(chan *copResponse, 2),
cmdType: cmdType,
storeType: req.StoreType,
storeAddr: addr,
})
}
return tasks, nil
}
func reverseTasks(tasks []*copTask) {
for i := 0; i < len(tasks)/2; i++ {
j := len(tasks) - i - 1
tasks[i], tasks[j] = tasks[j], tasks[i]
}
}
type copIterator struct {
store *Store
req *kv.Request
concurrency int
finishCh chan struct{}
// If keepOrder, results are stored in copTask.respChan, read them out one by one.
tasks []*copTask
// curr indicates the curr id of the finished copTask
curr int
// sendRate controls the sending rate of copIteratorTaskSender
sendRate *util.RateLimit
// Otherwise, results are stored in respChan.
respChan chan *copResponse
vars *tikv.Variables
memTracker *memory.Tracker
replicaReadSeed uint32
rpcCancel *tikv.RPCCanceller
wg sync.WaitGroup
// closed represents when the Close is called.
// There are two cases we need to close the `finishCh` channel, one is when context is done, the other one is
// when the Close is called. we use atomic.CompareAndSwap `closed` to to make sure the channel is not closed twice.
closed uint32
resolvedLocks *util.TSSet
actionOnExceed *rateLimitAction
}
// copIteratorWorker receives tasks from copIteratorTaskSender, handles tasks and sends the copResponse to respChan.
type copIteratorWorker struct {
taskCh <-chan *copTask
wg *sync.WaitGroup
store *Store
req *kv.Request
respChan chan<- *copResponse
finishCh <-chan struct{}
vars *tikv.Variables
*tikv.ClientHelper
memTracker *memory.Tracker
replicaReadSeed uint32
actionOnExceed *rateLimitAction
}
// copIteratorTaskSender sends tasks to taskCh then wait for the workers to exit.
type copIteratorTaskSender struct {
taskCh chan<- *copTask
wg *sync.WaitGroup
tasks []*copTask
finishCh <-chan struct{}
respChan chan<- *copResponse
sendRate *util.RateLimit
}
type copResponse struct {
pbResp *coprocessor.Response
detail *CopRuntimeStats
startKey kv.Key
err error
respSize int64
respTime time.Duration
}
const sizeofExecDetails = int(unsafe.Sizeof(execdetails.ExecDetails{}))
// GetData implements the kv.ResultSubset GetData interface.
func (rs *copResponse) GetData() []byte {
return rs.pbResp.Data
}
// GetStartKey implements the kv.ResultSubset GetStartKey interface.
func (rs *copResponse) GetStartKey() kv.Key {
return rs.startKey
}
func (rs *copResponse) GetCopRuntimeStats() *CopRuntimeStats {
return rs.detail
}
// MemSize returns how many bytes of memory this response use
func (rs *copResponse) MemSize() int64 {
if rs.respSize != 0 {
return rs.respSize
}
if rs == finCopResp {
return 0
}
// ignore rs.err
rs.respSize += int64(cap(rs.startKey))
if rs.detail != nil {
rs.respSize += int64(sizeofExecDetails)
}
if rs.pbResp != nil {
// Using a approximate size since it's hard to get a accurate value.
rs.respSize += int64(rs.pbResp.Size())
}
return rs.respSize
}
func (rs *copResponse) RespTime() time.Duration {
return rs.respTime
}
const minLogCopTaskTime = 300 * time.Millisecond
// When the worker finished `handleTask`, we need to notify the copIterator that there is one task finished.
// For the non-keep-order case, we send a finCopResp into the respCh after `handleTask`. When copIterator recv
// finCopResp from the respCh, it will be aware that there is one task finished.
var finCopResp *copResponse
func init() {
finCopResp = &copResponse{}
}
// run is a worker function that get a copTask from channel, handle it and
// send the result back.
func (worker *copIteratorWorker) run(ctx context.Context) {
defer func() {
failpoint.Inject("ticase-4169", func(val failpoint.Value) {
if val.(bool) {
worker.memTracker.Consume(10 * MockResponseSizeForTest)
worker.memTracker.Consume(10 * MockResponseSizeForTest)
}
})
worker.wg.Done()
}()
for task := range worker.taskCh {
respCh := worker.respChan
if respCh == nil {
respCh = task.respChan
}
worker.handleTask(ctx, task, respCh)
if worker.respChan != nil {
// When a task is finished by the worker, send a finCopResp into channel to notify the copIterator that
// there is a task finished.
worker.sendToRespCh(finCopResp, worker.respChan, false)
}
close(task.respChan)
if worker.vars != nil && worker.vars.Killed != nil && atomic.LoadUint32(worker.vars.Killed) == 1 {
return
}
select {
case <-worker.finishCh:
return
default:
}
}
}
// open starts workers and sender goroutines.
func (it *copIterator) open(ctx context.Context, enabledRateLimitAction bool) {
taskCh := make(chan *copTask, 1)
it.wg.Add(it.concurrency)
// Start it.concurrency number of workers to handle cop requests.
for i := 0; i < it.concurrency; i++ {
worker := &copIteratorWorker{
taskCh: taskCh,
wg: &it.wg,
store: it.store,
req: it.req,
respChan: it.respChan,
finishCh: it.finishCh,
vars: it.vars,
ClientHelper: tikv.NewClientHelper(it.store.KVStore, it.resolvedLocks),
memTracker: it.memTracker,
replicaReadSeed: it.replicaReadSeed,
actionOnExceed: it.actionOnExceed,
}
go worker.run(ctx)
}
taskSender := &copIteratorTaskSender{
taskCh: taskCh,
wg: &it.wg,
tasks: it.tasks,
finishCh: it.finishCh,
sendRate: it.sendRate,
}
taskSender.respChan = it.respChan
it.actionOnExceed.setEnabled(enabledRateLimitAction)
failpoint.Inject("ticase-4171", func(val failpoint.Value) {
if val.(bool) {
it.memTracker.Consume(10 * MockResponseSizeForTest)
it.memTracker.Consume(10 * MockResponseSizeForTest)
}
})
go taskSender.run()
}
func (sender *copIteratorTaskSender) run() {
// Send tasks to feed the worker goroutines.
for _, t := range sender.tasks {
// we control the sending rate to prevent all tasks
// being done (aka. all of the responses are buffered) by copIteratorWorker.
// We keep the number of inflight tasks within the number of 2 * concurrency when Keep Order is true.
// If KeepOrder is false, the number equals the concurrency.
// It sends one more task if a task has been finished in copIterator.Next.
exit := sender.sendRate.GetToken(sender.finishCh)
if exit {
break
}
exit = sender.sendToTaskCh(t)
if exit {
break
}
}
close(sender.taskCh)
// Wait for worker goroutines to exit.
sender.wg.Wait()
if sender.respChan != nil {
close(sender.respChan)
}
}
func (it *copIterator) recvFromRespCh(ctx context.Context, respCh <-chan *copResponse) (resp *copResponse, ok bool, exit bool) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case resp, ok = <-respCh:
if it.memTracker != nil && resp != nil {
consumed := resp.MemSize()
failpoint.Inject("testRateLimitActionMockConsumeAndAssert", func(val failpoint.Value) {
if val.(bool) {
if resp != finCopResp {
consumed = MockResponseSizeForTest
}
}
})
it.memTracker.Consume(-consumed)
}
return
case <-it.finishCh:
exit = true
return
case <-ticker.C:
if atomic.LoadUint32(it.vars.Killed) == 1 {
resp = &copResponse{err: tikvstore.ErrQueryInterrupted}
ok = true
return
}
case <-ctx.Done():
// We select the ctx.Done() in the thread of `Next` instead of in the worker to avoid the cost of `WithCancel`.
if atomic.CompareAndSwapUint32(&it.closed, 0, 1) {
close(it.finishCh)
}
exit = true
return
}
}
}
func (sender *copIteratorTaskSender) sendToTaskCh(t *copTask) (exit bool) {
select {
case sender.taskCh <- t:
case <-sender.finishCh:
exit = true
}
return
}
func (worker *copIteratorWorker) sendToRespCh(resp *copResponse, respCh chan<- *copResponse, checkOOM bool) (exit bool) {
if worker.memTracker != nil && checkOOM {
consumed := resp.MemSize()
failpoint.Inject("testRateLimitActionMockConsumeAndAssert", func(val failpoint.Value) {
if val.(bool) {
if resp != finCopResp {
consumed = MockResponseSizeForTest
}
}
})
worker.memTracker.Consume(consumed)
}
select {
case respCh <- resp:
case <-worker.finishCh:
exit = true
}
return
}
// MockResponseSizeForTest mock the response size
const MockResponseSizeForTest = 100 * 1024 * 1024
// Next returns next coprocessor result.
// NOTE: Use nil to indicate finish, so if the returned ResultSubset is not nil, reader should continue to call Next().
func (it *copIterator) Next(ctx context.Context) (kv.ResultSubset, error) {
var (
resp *copResponse
ok bool
closed bool
)
defer func() {
if resp == nil {
failpoint.Inject("ticase-4170", func(val failpoint.Value) {
if val.(bool) {
it.memTracker.Consume(10 * MockResponseSizeForTest)
it.memTracker.Consume(10 * MockResponseSizeForTest)
}
})
}
}()
// wait unit at least 5 copResponse received.
failpoint.Inject("testRateLimitActionMockWaitMax", func(val failpoint.Value) {
if val.(bool) {
// we only need to trigger oom at least once.
if len(it.tasks) > 9 {
for it.memTracker.MaxConsumed() < 5*MockResponseSizeForTest {
time.Sleep(10 * time.Millisecond)
}
}
}
})
// If data order matters, response should be returned in the same order as copTask slice.
// Otherwise all responses are returned from a single channel.
if it.respChan != nil {
// Get next fetched resp from chan
resp, ok, closed = it.recvFromRespCh(ctx, it.respChan)
if !ok || closed {
it.actionOnExceed.close()
return nil, nil
}
if resp == finCopResp {
it.actionOnExceed.destroyTokenIfNeeded(func() {
it.sendRate.PutToken()
})
return it.Next(ctx)
}
} else {
for {
if it.curr >= len(it.tasks) {
// Resp will be nil if iterator is finishCh.
it.actionOnExceed.close()
return nil, nil
}
task := it.tasks[it.curr]
resp, ok, closed = it.recvFromRespCh(ctx, task.respChan)
if closed {
// Close() is already called, so Next() is invalid.
return nil, nil
}
if ok {
break
}
it.actionOnExceed.destroyTokenIfNeeded(func() {
it.sendRate.PutToken()
})
// Switch to next task.
it.tasks[it.curr] = nil
it.curr++
}
}
if resp.err != nil {
return nil, errors.Trace(resp.err)
}
err := it.store.CheckVisibility(it.req.StartTs)
if err != nil {
return nil, errors.Trace(err)
}
return resp, nil
}
// Associate each region with an independent backoffer. In this way, when multiple regions are
// unavailable, TiDB can execute very quickly without blocking
func chooseBackoffer(ctx context.Context, backoffermap map[uint64]*tikv.Backoffer, task *copTask, worker *copIteratorWorker) *tikv.Backoffer {
bo, ok := backoffermap[task.region.GetID()]
if ok {
return bo
}
newbo := tikv.NewBackofferWithVars(ctx, copNextMaxBackoff, worker.vars)
backoffermap[task.region.GetID()] = newbo
return newbo
}
// handleTask handles single copTask, sends the result to channel, retry automatically on error.
func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask, respCh chan<- *copResponse) {
defer func() {
r := recover()
if r != nil {
logutil.BgLogger().Error("copIteratorWork meet panic",
zap.Reflect("r", r),
zap.Stack("stack trace"))
resp := &copResponse{err: errors.Errorf("%v", r)}
// if panic has happened, set checkOOM to false to avoid another panic.
worker.sendToRespCh(resp, respCh, false)
}
}()
remainTasks := []*copTask{task}
backoffermap := make(map[uint64]*tikv.Backoffer)
for len(remainTasks) > 0 {
curTask := remainTasks[0]
bo := chooseBackoffer(ctx, backoffermap, curTask, worker)
tasks, err := worker.handleTaskOnce(bo, curTask, respCh)
if err != nil {
resp := &copResponse{err: errors.Trace(err)}
worker.sendToRespCh(resp, respCh, true)
return
}
// test whether the ctx is cancelled
if vars := bo.GetVars(); vars != nil && vars.Killed != nil && atomic.LoadUint32(vars.Killed) == 1 {
return
}
if len(tasks) > 0 {
remainTasks = append(tasks, remainTasks[1:]...)
} else {
remainTasks = remainTasks[1:]
}
}
if worker.store.coprCache != nil && worker.store.coprCache.cache.Metrics != nil {
coprCacheHistogramEvict.Observe(float64(worker.store.coprCache.cache.Metrics.KeysEvicted()))
}
}
// handleTaskOnce handles single copTask, successful results are send to channel.
// If error happened, returns error. If region split or meet lock, returns the remain tasks.
func (worker *copIteratorWorker) handleTaskOnce(bo *tikv.Backoffer, task *copTask, ch chan<- *copResponse) ([]*copTask, error) {
failpoint.Inject("handleTaskOnceError", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(nil, errors.New("mock handleTaskOnce error"))
}
})
copReq := coprocessor.Request{
Tp: worker.req.Tp,
StartTs: worker.req.StartTs,
Data: worker.req.Data,
Ranges: task.ranges.ToPBRanges(),
SchemaVer: worker.req.SchemaVar,
}
var cacheKey []byte = nil
var cacheValue *coprCacheValue = nil
// If there are many ranges, it is very likely to be a TableLookupRequest. They are not worth to cache since
// computing is not the main cost. Ignore such requests directly to avoid slowly building the cache key.
if task.cmdType == tikvrpc.CmdCop && worker.store.coprCache != nil && worker.req.Cacheable && worker.store.coprCache.CheckRequestAdmission(len(copReq.Ranges)) {
cKey, err := coprCacheBuildKey(&copReq)
if err == nil {
cacheKey = cKey
cValue := worker.store.coprCache.Get(cKey)
copReq.IsCacheEnabled = true
if cValue != nil && cValue.RegionID == task.region.GetID() && cValue.TimeStamp <= worker.req.StartTs {
// Append cache version to the request to skip Coprocessor computation if possible
// when request result is cached
copReq.CacheIfMatchVersion = cValue.RegionDataVersion
cacheValue = cValue
} else {
copReq.CacheIfMatchVersion = 0
}
} else {
logutil.BgLogger().Warn("Failed to build copr cache key", zap.Error(err))
}
}
req := tikvrpc.NewReplicaReadRequest(task.cmdType, &copReq, worker.req.ReplicaRead, &worker.replicaReadSeed, kvrpcpb.Context{
IsolationLevel: tikv.IsolationLevelToPB(worker.req.IsolationLevel),
Priority: tikv.PriorityToPB(worker.req.Priority),
NotFillCache: worker.req.NotFillCache,
RecordTimeStat: true,
RecordScanStat: true,
TaskId: worker.req.TaskID,
})
req.StoreTp = getEndPointType(task.storeType)
startTime := time.Now()
if worker.Stats == nil {
worker.Stats = make(map[tikvrpc.CmdType]*tikv.RPCRuntimeStats)
}
if worker.req.IsStaleness {
req.EnableStaleRead()
}
var ops []tikv.StoreSelectorOption
if len(worker.req.MatchStoreLabels) > 0 {
ops = append(ops, tikv.WithMatchLabels(worker.req.MatchStoreLabels))
}
resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, tikv.ReadTimeoutMedium, getEndPointType(task.storeType), task.storeAddr, ops...)
if err != nil {
if task.storeType == kv.TiDB {
err = worker.handleTiDBSendReqErr(err, task, ch)
return nil, err
}
return nil, errors.Trace(err)
}
// Set task.storeAddr field so its task.String() method have the store address information.
task.storeAddr = storeAddr
costTime := time.Since(startTime)
if costTime > minLogCopTaskTime {
worker.logTimeCopTask(costTime, task, bo, resp)
}
metrics.TiKVCoprocessorHistogram.Observe(costTime.Seconds())
if task.cmdType == tikvrpc.CmdCopStream {
return worker.handleCopStreamResult(bo, rpcCtx, resp.Resp.(*tikvrpc.CopStreamResponse), task, ch, costTime)
}
// Handles the response for non-streaming copTask.
return worker.handleCopResponse(bo, rpcCtx, &copResponse{pbResp: resp.Resp.(*coprocessor.Response)}, cacheKey, cacheValue, task, ch, nil, costTime)
}
const (
minLogBackoffTime = 100
minLogKVProcessTime = 100
)
func (worker *copIteratorWorker) logTimeCopTask(costTime time.Duration, task *copTask, bo *tikv.Backoffer, resp *tikvrpc.Response) {
logStr := fmt.Sprintf("[TIME_COP_PROCESS] resp_time:%s txnStartTS:%d region_id:%d store_addr:%s", costTime, worker.req.StartTs, task.region.GetID(), task.storeAddr)
if bo.GetTotalSleep() > minLogBackoffTime {
backoffTypes := strings.Replace(fmt.Sprintf("%v", bo.GetTypes()), " ", ",", -1)
logStr += fmt.Sprintf(" backoff_ms:%d backoff_types:%s", bo.GetTotalSleep(), backoffTypes)
}
var detailV2 *kvrpcpb.ExecDetailsV2
var detail *kvrpcpb.ExecDetails
if resp.Resp != nil {
switch r := resp.Resp.(type) {
case *coprocessor.Response:
detailV2 = r.ExecDetailsV2
detail = r.ExecDetails
case *tikvrpc.CopStreamResponse:
// streaming request returns io.EOF, so the first CopStreamResponse.Response maybe nil.
if r.Response != nil {
detailV2 = r.Response.ExecDetailsV2
detail = r.Response.ExecDetails
}
default:
panic("unreachable")
}
}
var timeDetail *kvrpcpb.TimeDetail
if detailV2 != nil && detailV2.TimeDetail != nil {
timeDetail = detailV2.TimeDetail
} else if detail != nil && detail.TimeDetail != nil {
timeDetail = detail.TimeDetail
}
if timeDetail != nil {
logStr += fmt.Sprintf(" kv_process_ms:%d", timeDetail.ProcessWallTimeMs)
logStr += fmt.Sprintf(" kv_wait_ms:%d", timeDetail.WaitWallTimeMs)
if timeDetail.ProcessWallTimeMs <= minLogKVProcessTime {
logStr = strings.Replace(logStr, "TIME_COP_PROCESS", "TIME_COP_WAIT", 1)
}
}
if detailV2 != nil && detailV2.ScanDetailV2 != nil {
logStr += fmt.Sprintf(" processed_versions:%d", detailV2.ScanDetailV2.ProcessedVersions)
logStr += fmt.Sprintf(" total_versions:%d", detailV2.ScanDetailV2.TotalVersions)
logStr += fmt.Sprintf(" rocksdb_delete_skipped_count:%d", detailV2.ScanDetailV2.RocksdbDeleteSkippedCount)
logStr += fmt.Sprintf(" rocksdb_key_skipped_count:%d", detailV2.ScanDetailV2.RocksdbKeySkippedCount)
logStr += fmt.Sprintf(" rocksdb_cache_hit_count:%d", detailV2.ScanDetailV2.RocksdbBlockCacheHitCount)
logStr += fmt.Sprintf(" rocksdb_read_count:%d", detailV2.ScanDetailV2.RocksdbBlockReadCount)
logStr += fmt.Sprintf(" rocksdb_read_byte:%d", detailV2.ScanDetailV2.RocksdbBlockReadByte)
} else if detail != nil && detail.ScanDetail != nil {
logStr = appendScanDetail(logStr, "write", detail.ScanDetail.Write)
logStr = appendScanDetail(logStr, "data", detail.ScanDetail.Data)
logStr = appendScanDetail(logStr, "lock", detail.ScanDetail.Lock)
}
logutil.Logger(bo.GetCtx()).Info(logStr)
}
func appendScanDetail(logStr string, columnFamily string, scanInfo *kvrpcpb.ScanInfo) string {
if scanInfo != nil {
logStr += fmt.Sprintf(" scan_total_%s:%d", columnFamily, scanInfo.Total)
logStr += fmt.Sprintf(" scan_processed_%s:%d", columnFamily, scanInfo.Processed)
}
return logStr
}
func (worker *copIteratorWorker) handleCopStreamResult(bo *tikv.Backoffer, rpcCtx *tikv.RPCContext, stream *tikvrpc.CopStreamResponse, task *copTask, ch chan<- *copResponse, costTime time.Duration) ([]*copTask, error) {
defer stream.Close()
var resp *coprocessor.Response
var lastRange *coprocessor.KeyRange
resp = stream.Response
if resp == nil {
// streaming request returns io.EOF, so the first Response is nil.
return nil, nil
}
for {
remainedTasks, err := worker.handleCopResponse(bo, rpcCtx, &copResponse{pbResp: resp}, nil, nil, task, ch, lastRange, costTime)
if err != nil || len(remainedTasks) != 0 {
return remainedTasks, errors.Trace(err)
}
resp, err = stream.Recv()
if err != nil {
if errors.Cause(err) == io.EOF {
return nil, nil
}
boRPCType := tikv.BoTiKVRPC
if task.storeType == kv.TiFlash {
boRPCType = tikv.BoTiFlashRPC
}
if err1 := bo.Backoff(boRPCType, errors.Errorf("recv stream response error: %v, task: %s", err, task)); err1 != nil {
return nil, errors.Trace(err)
}
// No coprocessor.Response for network error, rebuild task based on the last success one.
if errors.Cause(err) == context.Canceled {
logutil.BgLogger().Info("stream recv timeout", zap.Error(err))
} else {
logutil.BgLogger().Info("stream unknown error", zap.Error(err))
}
return worker.buildCopTasksFromRemain(bo, lastRange, task)
}
if resp.Range != nil {
lastRange = resp.Range
}
}
}
// handleCopResponse checks coprocessor Response for region split and lock,
// returns more tasks when that happens, or handles the response if no error.
// if we're handling streaming coprocessor response, lastRange is the range of last
// successful response, otherwise it's nil.
func (worker *copIteratorWorker) handleCopResponse(bo *tikv.Backoffer, rpcCtx *tikv.RPCContext, resp *copResponse, cacheKey []byte, cacheValue *coprCacheValue, task *copTask, ch chan<- *copResponse, lastRange *coprocessor.KeyRange, costTime time.Duration) ([]*copTask, error) {
if regionErr := resp.pbResp.GetRegionError(); regionErr != nil {
if rpcCtx != nil && task.storeType == kv.TiDB {
resp.err = errors.Errorf("error: %v", regionErr)
worker.sendToRespCh(resp, ch, true)
return nil, nil
}
errStr := fmt.Sprintf("region_id:%v, region_ver:%v, store_type:%s, peer_addr:%s, error:%s",
task.region.GetID(), task.region.GetVer(), task.storeType.Name(), task.storeAddr, regionErr.String())
if err := bo.Backoff(tikv.BoRegionMiss, errors.New(errStr)); err != nil {
return nil, errors.Trace(err)
}
// We may meet RegionError at the first packet, but not during visiting the stream.
return buildCopTasks(bo, worker.store.GetRegionCache(), task.ranges, worker.req)
}
if lockErr := resp.pbResp.GetLocked(); lockErr != nil {
logutil.BgLogger().Debug("coprocessor encounters",
zap.Stringer("lock", lockErr))
msBeforeExpired, err1 := worker.ResolveLocks(bo, worker.req.StartTs, []*tikv.Lock{tikv.NewLock(lockErr)})
if err1 != nil {
return nil, errors.Trace(err1)
}
if msBeforeExpired > 0 {
if err := bo.BackoffWithMaxSleep(tikv.BoTxnLockFast, int(msBeforeExpired), errors.New(lockErr.String())); err != nil {
return nil, errors.Trace(err)
}
}
return worker.buildCopTasksFromRemain(bo, lastRange, task)
}
if otherErr := resp.pbResp.GetOtherError(); otherErr != "" {
err := errors.Errorf("other error: %s", otherErr)
logutil.BgLogger().Warn("other error",
zap.Uint64("txnStartTS", worker.req.StartTs),
zap.Uint64("regionID", task.region.GetID()),
zap.String("storeAddr", task.storeAddr),
zap.Error(err))
return nil, errors.Trace(err)
}
// When the request is using streaming API, the `Range` is not nil.
if resp.pbResp.Range != nil {
resp.startKey = resp.pbResp.Range.Start
} else if task.ranges != nil && task.ranges.Len() > 0 {
resp.startKey = kv.Key(task.ranges.At(0).StartKey)
}
if resp.detail == nil {
resp.detail = new(CopRuntimeStats)
}
resp.detail.Stats = worker.Stats
worker.Stats = nil
backoffTimes := bo.GetBackoffTimes()
resp.detail.BackoffTime = time.Duration(bo.GetTotalSleep()) * time.Millisecond
resp.detail.BackoffSleep = make(map[string]time.Duration, len(backoffTimes))
resp.detail.BackoffTimes = make(map[string]int, len(backoffTimes))
for backoff := range backoffTimes {
backoffName := backoff.String()
resp.detail.BackoffTimes[backoffName] = backoffTimes[backoff]
resp.detail.BackoffSleep[backoffName] = time.Duration(bo.GetBackoffSleepMS()[backoff]) * time.Millisecond
}
if rpcCtx != nil {
resp.detail.CalleeAddress = rpcCtx.Addr
}
resp.respTime = costTime
sd := &util.ScanDetail{}
td := util.TimeDetail{}
if pbDetails := resp.pbResp.ExecDetailsV2; pbDetails != nil {
// Take values in `ExecDetailsV2` first.
if timeDetail := pbDetails.TimeDetail; timeDetail != nil {
td.MergeFromTimeDetail(timeDetail)
}
if scanDetailV2 := pbDetails.ScanDetailV2; scanDetailV2 != nil {
sd.MergeFromScanDetailV2(scanDetailV2)
}
} else if pbDetails := resp.pbResp.ExecDetails; pbDetails != nil {
if timeDetail := pbDetails.TimeDetail; timeDetail != nil {
td.MergeFromTimeDetail(timeDetail)
}
if scanDetail := pbDetails.ScanDetail; scanDetail != nil {
if scanDetail.Write != nil {
sd.ProcessedKeys = scanDetail.Write.Processed
sd.TotalKeys = scanDetail.Write.Total
}
}
}
resp.detail.ScanDetail = sd
resp.detail.TimeDetail = td
if resp.pbResp.IsCacheHit {
if cacheValue == nil {
return nil, errors.New("Internal error: received illegal TiKV response")
}
// Cache hit and is valid: use cached data as response data and we don't update the cache.
data := make([]byte, len(cacheValue.Data))
copy(data, cacheValue.Data)
resp.pbResp.Data = data
resp.detail.CoprCacheHit = true
} else {
// Cache not hit or cache hit but not valid: update the cache if the response can be cached.
if cacheKey != nil && resp.pbResp.CanBeCached && resp.pbResp.CacheLastVersion > 0 {
if worker.store.coprCache.CheckResponseAdmission(resp.pbResp.Data.Size(), resp.detail.TimeDetail.ProcessTime) {
data := make([]byte, len(resp.pbResp.Data))
copy(data, resp.pbResp.Data)
newCacheValue := coprCacheValue{
Data: data,
TimeStamp: worker.req.StartTs,
RegionID: task.region.GetID(),
RegionDataVersion: resp.pbResp.CacheLastVersion,
}
worker.store.coprCache.Set(cacheKey, &newCacheValue)
}
}
}
worker.sendToRespCh(resp, ch, true)
return nil, nil
}
// CopRuntimeStats contains execution detail information.
type CopRuntimeStats struct {
execdetails.ExecDetails
tikv.RegionRequestRuntimeStats
CoprCacheHit bool
}
func (worker *copIteratorWorker) handleTiDBSendReqErr(err error, task *copTask, ch chan<- *copResponse) error {
errCode := errno.ErrUnknown
errMsg := err.Error()
if terror.ErrorEqual(err, tikvstore.ErrTiKVServerTimeout) {
errCode = errno.ErrTiKVServerTimeout
errMsg = "TiDB server timeout, address is " + task.storeAddr
}
if terror.ErrorEqual(err, tikvstore.ErrTiFlashServerTimeout) {
errCode = errno.ErrTiFlashServerTimeout
errMsg = "TiDB server timeout, address is " + task.storeAddr
}
selResp := tipb.SelectResponse{
Warnings: []*tipb.Error{
{
Code: int32(errCode),
Msg: errMsg,
},
},
}
data, err := proto.Marshal(&selResp)
if err != nil {
return errors.Trace(err)
}
resp := &copResponse{
pbResp: &coprocessor.Response{
Data: data,
},
detail: &CopRuntimeStats{},
}
worker.sendToRespCh(resp, ch, true)
return nil
}
func (worker *copIteratorWorker) buildCopTasksFromRemain(bo *tikv.Backoffer, lastRange *coprocessor.KeyRange, task *copTask) ([]*copTask, error) {
remainedRanges := task.ranges
if worker.req.Streaming && lastRange != nil {
remainedRanges = worker.calculateRemain(task.ranges, lastRange, worker.req.Desc)
}
return buildCopTasks(bo, worker.store.GetRegionCache(), remainedRanges, worker.req)
}
// calculateRemain splits the input ranges into two, and take one of them according to desc flag.
// It's used in streaming API, to calculate which range is consumed and what needs to be retry.
// For example:
// ranges: [r1 --> r2) [r3 --> r4)
// split: [s1 --> s2)
// In normal scan order, all data before s1 is consumed, so the remain ranges should be [s1 --> r2) [r3 --> r4)
// In reverse scan order, all data after s2 is consumed, so the remain ranges should be [r1 --> r2) [r3 --> s2)
func (worker *copIteratorWorker) calculateRemain(ranges *tikv.KeyRanges, split *coprocessor.KeyRange, desc bool) *tikv.KeyRanges {
if desc {
left, _ := ranges.Split(split.End)
return left
}
_, right := ranges.Split(split.Start)
return right
}
func (it *copIterator) Close() error {
if atomic.CompareAndSwapUint32(&it.closed, 0, 1) {
close(it.finishCh)
}
it.rpcCancel.CancelAll()
it.actionOnExceed.close()
it.wg.Wait()
return nil
}
// copErrorResponse returns error when calling Next()
type copErrorResponse struct{ error }
func (it copErrorResponse) Next(ctx context.Context) (kv.ResultSubset, error) {
return nil, it.error
}
func (it copErrorResponse) Close() error {
return nil
}
// rateLimitAction an OOM Action which is used to control the token if OOM triggered. The token number should be
// set on initial. Each time the Action is triggered, one token would be destroyed. If the count of the token is less
// than 2, the action would be delegated to the fallback action.
type rateLimitAction struct {
memory.BaseOOMAction
// enabled indicates whether the rateLimitAction is permitted to Action. 1 means permitted, 0 denied.
enabled uint32
// totalTokenNum indicates the total token at initial
totalTokenNum uint
cond struct {
sync.Mutex
// exceeded indicates whether have encountered OOM situation.
exceeded bool
// remainingTokenNum indicates the count of tokens which still exists
remainingTokenNum uint
once sync.Once
// triggerCountForTest indicates the total count of the rateLimitAction's Action being executed
triggerCountForTest uint
}
}
func newRateLimitAction(totalTokenNumber uint) *rateLimitAction {
return &rateLimitAction{
totalTokenNum: totalTokenNumber,
cond: struct {
sync.Mutex
exceeded bool
remainingTokenNum uint
once sync.Once
triggerCountForTest uint
}{
Mutex: sync.Mutex{},
exceeded: false,
remainingTokenNum: totalTokenNumber,
once: sync.Once{},
},
}
}
// Action implements ActionOnExceed.Action
func (e *rateLimitAction) Action(t *memory.Tracker) {
if !e.isEnabled() {
if fallback := e.GetFallback(); fallback != nil {
fallback.Action(t)
}
return
}
e.conditionLock()
defer e.conditionUnlock()
e.cond.once.Do(func() {
if e.cond.remainingTokenNum < 2 {
e.setEnabled(false)
logutil.BgLogger().Info("memory exceeds quota, rateLimitAction delegate to fallback action",
zap.Uint("total token count", e.totalTokenNum))
if fallback := e.GetFallback(); fallback != nil {
fallback.Action(t)
}
return
}
failpoint.Inject("testRateLimitActionMockConsumeAndAssert", func(val failpoint.Value) {
if val.(bool) {
if e.cond.triggerCountForTest+e.cond.remainingTokenNum != e.totalTokenNum {
panic("triggerCount + remainingTokenNum not equal to totalTokenNum")
}
}
})
logutil.BgLogger().Info("memory exceeds quota, destroy one token now.",
zap.Int64("consumed", t.BytesConsumed()),
zap.Int64("quota", t.GetBytesLimit()),
zap.Uint("total token count", e.totalTokenNum),
zap.Uint("remaining token count", e.cond.remainingTokenNum))
e.cond.exceeded = true
e.cond.triggerCountForTest++
})
}
// SetLogHook implements ActionOnExceed.SetLogHook
func (e *rateLimitAction) SetLogHook(hook func(uint64)) {
}
// GetPriority get the priority of the Action.
func (e *rateLimitAction) GetPriority() int64 {
return memory.DefRateLimitPriority
}
// destroyTokenIfNeeded will check the `exceed` flag after copWorker finished one task.
// If the exceed flag is true and there is no token been destroyed before, one token will be destroyed,
// or the token would be return back.
func (e *rateLimitAction) destroyTokenIfNeeded(returnToken func()) {
if !e.isEnabled() {
returnToken()
return
}
e.conditionLock()
defer e.conditionUnlock()
if !e.cond.exceeded {
returnToken()
return
}
// If actionOnExceed has been triggered and there is no token have been destroyed before,
// destroy one token.
e.cond.remainingTokenNum = e.cond.remainingTokenNum - 1
e.cond.exceeded = false
e.cond.once = sync.Once{}
}
func (e *rateLimitAction) conditionLock() {
e.cond.Lock()
}
func (e *rateLimitAction) conditionUnlock() {
e.cond.Unlock()
}
func (e *rateLimitAction) close() {
if !e.isEnabled() {
return
}
e.setEnabled(false)
e.conditionLock()
defer e.conditionUnlock()
e.cond.exceeded = false
}
func (e *rateLimitAction) setEnabled(enabled bool) {
newValue := uint32(0)
if enabled {
newValue = uint32(1)
}
atomic.StoreUint32(&e.enabled, newValue)
}
func (e *rateLimitAction) isEnabled() bool {
return atomic.LoadUint32(&e.enabled) > 0
}
| store/copr/coprocessor.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0021431248169392347,
0.00019134278409183025,
0.0001615733781363815,
0.0001676791434874758,
0.0001816115836845711
] |
{
"id": 10,
"code_window": [
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\n",
"\t// test no wait\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, k2)\n",
"\t// cannot acquire lock immediately thus error\n",
"\tc.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 880
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.9971144199371338,
0.06642968952655792,
0.00015946228813845664,
0.0017117704264819622,
0.226938858628273
] |
{
"id": 10,
"code_window": [
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\n",
"\t// test no wait\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, k2)\n",
"\t// cannot acquire lock immediately thus error\n",
"\tc.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 880
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"unsafe"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
)
const (
// DefPartialResult4RankSize is the size of partialResult4Rank
DefPartialResult4RankSize = int64(unsafe.Sizeof(partialResult4Rank{}))
)
type rank struct {
baseAggFunc
isDense bool
rowComparer
}
type partialResult4Rank struct {
curIdx int64
lastRank int64
rows []chunk.Row
}
func (r *rank) AllocPartialResult() (pr PartialResult, memDelta int64) {
return PartialResult(&partialResult4Rank{}), DefPartialResult4RankSize
}
func (r *rank) ResetPartialResult(pr PartialResult) {
p := (*partialResult4Rank)(pr)
p.curIdx = 0
p.lastRank = 0
p.rows = p.rows[:0]
}
func (r *rank) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4Rank)(pr)
p.rows = append(p.rows, rowsInGroup...)
memDelta += int64(len(rowsInGroup)) * DefRowSize
return memDelta, nil
}
func (r *rank) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4Rank)(pr)
p.curIdx++
if p.curIdx == 1 {
p.lastRank = 1
chk.AppendInt64(r.ordinal, p.lastRank)
return nil
}
if r.compareRows(p.rows[p.curIdx-2], p.rows[p.curIdx-1]) == 0 {
chk.AppendInt64(r.ordinal, p.lastRank)
return nil
}
if r.isDense {
p.lastRank++
} else {
p.lastRank = p.curIdx
}
chk.AppendInt64(r.ordinal, p.lastRank)
return nil
}
type rowComparer struct {
cmpFuncs []chunk.CompareFunc
colIdx []int
}
func buildRowComparer(cols []*expression.Column) rowComparer {
rc := rowComparer{}
rc.colIdx = make([]int, 0, len(cols))
rc.cmpFuncs = make([]chunk.CompareFunc, 0, len(cols))
for _, col := range cols {
cmpFunc := chunk.GetCompareFunc(col.RetType)
if cmpFunc == nil {
continue
}
rc.cmpFuncs = append(rc.cmpFuncs, chunk.GetCompareFunc(col.RetType))
rc.colIdx = append(rc.colIdx, col.Index)
}
return rc
}
func (rc *rowComparer) compareRows(prev, curr chunk.Row) int {
for i, idx := range rc.colIdx {
res := rc.cmpFuncs[i](prev, idx, curr, idx)
if res != 0 {
return res
}
}
return 0
}
| executor/aggfuncs/func_rank.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0001785891072358936,
0.00017433382163289934,
0.00016698235413059592,
0.00017535907682031393,
0.0000033659230211924296
] |
{
"id": 10,
"code_window": [
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\n",
"\t// test no wait\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, k2)\n",
"\t// cannot acquire lock immediately thus error\n",
"\tc.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 880
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"fmt"
"testing"
"time"
. "github.com/pingcap/check"
)
func TestT(t *testing.T) {
TestingT(t)
}
var _ = Suite(&testDeadlockSuite{})
type testDeadlockSuite struct{}
func (s *testDeadlockSuite) TestDeadlock(c *C) {
ttl := 50 * time.Millisecond
expireInterval := 100 * time.Millisecond
urgentSize := uint64(1)
detector := NewDetector(ttl, urgentSize, expireInterval)
err := detector.Detect(1, 2, 100)
c.Assert(err, IsNil)
c.Assert(detector.totalSize, Equals, uint64(1))
err = detector.Detect(2, 3, 200)
c.Assert(err, IsNil)
c.Assert(detector.totalSize, Equals, uint64(2))
err = detector.Detect(3, 1, 300)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, fmt.Sprintf("deadlock"))
c.Assert(detector.totalSize, Equals, uint64(2))
detector.CleanUp(2)
list2 := detector.waitForMap[2]
c.Assert(list2, IsNil)
c.Assert(detector.totalSize, Equals, uint64(1))
// After cycle is broken, no deadlock now.
err = detector.Detect(3, 1, 300)
c.Assert(err, IsNil)
list3 := detector.waitForMap[3]
c.Assert(list3.txns.Len(), Equals, 1)
c.Assert(detector.totalSize, Equals, uint64(2))
// Different keyHash grows the list.
err = detector.Detect(3, 1, 400)
c.Assert(err, IsNil)
c.Assert(list3.txns.Len(), Equals, 2)
c.Assert(detector.totalSize, Equals, uint64(3))
// Same waitFor and key hash doesn't grow the list.
err = detector.Detect(3, 1, 400)
c.Assert(err, IsNil)
c.Assert(list3.txns.Len(), Equals, 2)
c.Assert(detector.totalSize, Equals, uint64(3))
detector.CleanUpWaitFor(3, 1, 300)
c.Assert(list3.txns.Len(), Equals, 1)
c.Assert(detector.totalSize, Equals, uint64(2))
detector.CleanUpWaitFor(3, 1, 400)
c.Assert(detector.totalSize, Equals, uint64(1))
list3 = detector.waitForMap[3]
c.Assert(list3, IsNil)
// after 100ms, all entries expired, detect non exist edges
time.Sleep(100 * time.Millisecond)
err = detector.Detect(100, 200, 100)
c.Assert(err, IsNil)
c.Assert(detector.totalSize, Equals, uint64(1))
c.Assert(len(detector.waitForMap), Equals, 1)
// expired entry should not report deadlock, detect will remove this entry
// not dependent on expire check interval
time.Sleep(60 * time.Millisecond)
err = detector.Detect(200, 100, 200)
c.Assert(err, IsNil)
c.Assert(detector.totalSize, Equals, uint64(1))
c.Assert(len(detector.waitForMap), Equals, 1)
}
| store/mockstore/unistore/tikv/detector_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0013548422139137983,
0.0003758665407076478,
0.0001630008773645386,
0.0001765798806445673,
0.0003920110175386071
] |
{
"id": 10,
"code_window": [
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\n",
"\t// test no wait\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, k2)\n",
"\t// cannot acquire lock immediately thus error\n",
"\tc.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 880
} | # Proposal: Support SQL Plan Management
- Author(s): [Haibin Xie](https://github.com/lamxTyler)
- Last updated: 2018-12-11
- Discussion at:
## Abstract
This proposal aims to support the SQL plan management. With the help of it, we can force the optimizer to choose a certain plan without modifying the SQL text.
## Background
The optimizer chooses a plan based on several environmental factors, such as statistics, optimizer parameters, schema definitions and so on. Once the environment changes, we cannot guarantee that the newly optimized plan is always better than the old plan. Therefore we need to provide ways to bind the plan for applications that cannot take the risk of a changed plan.
## Proposal
The following proposal mainly focuses on two parts: how to bind the plan and what is the syntax to manage it.
### How to bind the plan
In order to bind the plan, we need to maintain a mapping from normalized SQL text to plan. To normalize the SQL text, we can remove all the blank space, replace the parameters with placement markers, and convert remaining parts to lower cases. The most difficult problem is how we represent and store the plan.
One way to represent the plan is using the optimized physical plan. However, it is difficult to perform the parameters replacement for later SQLs, because some parameters may already be rewritten in the optimized physical plan when doing logical and physical optimizations.
Since the parameters replacement is hard, it is better not doing it. Another way to represent the plan is using the AST of hinted SQL, so the only thing needs to do for later SQLs is to traverse the AST and copy hints.
### Syntax to manage the binding plan
To manage the SQL bindings, we need to support basic operations like create, show and drop. We can also support SQL bindings that only exist in the current session. The syntax will be like the following:
- CREATE [GLOBAL|SESSION] BINDING_NAME BINDING FOR `SQL` USING `HINTED SQL`
- DROP [GLOBAL|SESSION] BINDINGS
- DROP [GLOBAL|SESSION] BINDING BINDING_NAME
- SHOW [GLOBAL|SESSION] BINDINGS [SHOW_LIKE_OR_WHERE]
## Rationale
In Oracle, they only store the hints of the optimized query, instead of the whole AST. For TiDB, it requires more work to do it now because we need to generate unique identifiers for the subqueries and lift all hints to the outer most queries. Storing the AST is the simplest way now.
## Compatibility
MySQL does not support SQL plan management, so this will add syntaxes that not supported by MySQL.
## Implementation
To implement it, we need the following main steps:
- Normalize the SQL text. We can take this https://github.com/pingcap/parser/pull/32 as an example.
- Support the syntax in the parser.
- Store the binding SQL and AST of the hinted SQL in a system table. Since there is a unique mapping from SQL text to AST, we can just store the SQL and parse it to AST for later use. A background goroutine will check if there are new bindings and update the local cache.
- When another SQL comes, we first check if there is a matched SQL in the cache. If so, we can traverse the AST to add hints. Since comparing text for every SQL may affect unrelated SQLs a lot, we can calculate a hash value and first check if there is matching hash values.
## Open issues (if applicable)
| docs/design/2018-12-11-sql-plan-management.md | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017393376037944108,
0.00016859894094523042,
0.00016372036770917475,
0.0001685816387180239,
0.0000038667603803332895
] |
{
"id": 11,
"code_window": [
"\t// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn3.LockKeys(ctx, lockCtx, k3)\n",
"\tc.Assert(err, IsNil)\n",
"\tstatus, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 984
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"encoding/hex"
"math/rand"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
type actionPessimisticLock struct {
*kv.LockCtx
}
type actionPessimisticRollback struct{}
var (
_ twoPhaseCommitAction = actionPessimisticLock{}
_ twoPhaseCommitAction = actionPessimisticRollback{}
)
func (actionPessimisticLock) String() string {
return "pessimistic_lock"
}
func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticLock
}
func (actionPessimisticRollback) String() string {
return "pessimistic_rollback"
}
func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticRollback
}
func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
m := batch.mutations
mutations := make([]*pb.Mutation, m.Len())
for i := 0; i < m.Len(); i++ {
mut := &pb.Mutation{
Op: pb.Op_PessimisticLock,
Key: m.GetKey(i),
}
if c.txn.us.HasPresumeKeyNotExists(m.GetKey(i)) || (c.doingAmend && m.GetOp(i) == pb.Op_Insert) {
mut.Assertion = pb.Assertion_NotExist
}
mutations[i] = mut
}
elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond)
ttl := elapsed + atomic.LoadUint64(&ManagedLockTTL)
failpoint.Inject("shortPessimisticLockTTL", func() {
ttl = 1
keys := make([]string, 0, len(mutations))
for _, m := range mutations {
keys = append(keys, hex.EncodeToString(m.Key))
}
logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys))
})
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &pb.PessimisticLockRequest{
Mutations: mutations,
PrimaryLock: c.primary(),
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
LockTtl: ttl,
IsFirstLock: c.isFirstLock,
WaitTimeout: action.LockWaitTime,
ReturnValues: action.ReturnValues,
MinCommitTs: c.forUpdateTS + 1,
}, pb.Context{Priority: c.priority, SyncLog: c.syncLog})
lockWaitStartTime := action.WaitStartTime
for {
// if lockWaitTime set, refine the request `WaitTimeout` field based on timeout limit
if action.LockWaitTime > 0 {
timeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()
if timeLeft <= 0 {
req.PessimisticLock().WaitTimeout = tidbkv.LockNoWait
} else {
req.PessimisticLock().WaitTimeout = timeLeft
}
}
failpoint.Inject("PessimisticLockErrWriteConflict", func() error {
time.Sleep(300 * time.Millisecond)
return &kv.ErrWriteConflict{WriteConflict: nil}
})
startTime := time.Now()
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.LockRPCTime, int64(time.Since(startTime)))
atomic.AddInt64(&action.LockCtx.Stats.LockRPCCount, 1)
}
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticLockMutations(bo, action.LockCtx, batch.mutations)
return errors.Trace(err)
}
if resp.Resp == nil {
return errors.Trace(kv.ErrBodyMissing)
}
lockResp := resp.Resp.(*pb.PessimisticLockResponse)
keyErrs := lockResp.GetErrors()
if len(keyErrs) == 0 {
if action.ReturnValues {
action.ValuesLock.Lock()
for i, mutation := range mutations {
action.Values[string(mutation.Key)] = kv.ReturnedValue{Value: lockResp.Values[i]}
}
action.ValuesLock.Unlock()
}
return nil
}
var locks []*Lock
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
e := &kv.ErrKeyExist{AlreadyExist: alreadyExist}
return c.extractKeyExistsErr(e)
}
if deadlock := keyErr.Deadlock; deadlock != nil {
return &kv.ErrDeadlock{Deadlock: deadlock}
}
// Extract lock from key error
lock, err1 := extractLockFromKeyErr(keyErr)
if err1 != nil {
return errors.Trace(err1)
}
locks = append(locks, lock)
}
// Because we already waited on tikv, no need to Backoff here.
// tikv default will wait 3s(also the maximum wait value) when lock error occurs
startTime = time.Now()
msBeforeTxnExpired, _, err := c.store.lockResolver.ResolveLocks(bo, 0, locks)
if err != nil {
return errors.Trace(err)
}
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.ResolveLockTime, int64(time.Since(startTime)))
}
// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring
// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.
if msBeforeTxnExpired > 0 {
if action.LockWaitTime == tidbkv.LockNoWait {
return kv.ErrLockAcquireFailAndNoWaitSet
} else if action.LockWaitTime == tidbkv.LockAlwaysWait {
// do nothing but keep wait
} else {
// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock
if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime {
return errors.Trace(kv.ErrLockWaitTimeout)
}
}
if action.LockCtx.PessimisticLockWaited != nil {
atomic.StoreInt32(action.LockCtx.PessimisticLockWaited, 1)
}
}
// Handle the killed flag when waiting for the pessimistic lock.
// When a txn runs into LockKeys() and backoff here, it has no chance to call
// executor.Next() and check the killed flag.
if action.Killed != nil {
// Do not reset the killed flag here!
// actionPessimisticLock runs on each region parallelly, we have to consider that
// the error may be dropped.
if atomic.LoadUint32(action.Killed) == 1 {
return errors.Trace(kv.ErrQueryInterrupted)
}
}
}
}
func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &pb.PessimisticRollbackRequest{
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
Keys: batch.mutations.GetKeys(),
})
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticRollbackMutations(bo, batch.mutations)
return errors.Trace(err)
}
return nil
}
func (c *twoPhaseCommitter) pessimisticLockMutations(bo *Backoffer, lockCtx *kv.LockCtx, mutations CommitterMutations) error {
if c.sessionID > 0 {
failpoint.Inject("beforePessimisticLock", func(val failpoint.Value) {
// Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like
// `return("delay,fail")`. Then they will be executed sequentially at once.
if v, ok := val.(string); ok {
for _, action := range strings.Split(v, ",") {
if action == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 5))
logutil.Logger(bo.ctx).Info("[failpoint] injected delay at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
} else if action == "fail" {
logutil.Logger(bo.ctx).Info("[failpoint] injected failure at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS))
failpoint.Return(errors.New("injected failure at pessimistic lock"))
}
}
}
})
}
return c.doActionOnMutations(bo, actionPessimisticLock{lockCtx}, mutations)
}
func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *Backoffer, mutations CommitterMutations) error {
return c.doActionOnMutations(bo, actionPessimisticRollback{}, mutations)
}
| store/tikv/pessimistic.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.9982340335845947,
0.06943465769290924,
0.00016459802282042801,
0.0016376792918890715,
0.23564420640468597
] |
{
"id": 11,
"code_window": [
"\t// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn3.LockKeys(ctx, lockCtx, k3)\n",
"\tc.Assert(err, IsNil)\n",
"\tstatus, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 984
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/util/chunk"
)
// ExplainExec represents an explain executor.
type ExplainExec struct {
baseExecutor
explain *core.Explain
analyzeExec Executor
executed bool
rows [][]string
cursor int
}
// Open implements the Executor Open interface.
func (e *ExplainExec) Open(ctx context.Context) error {
if e.analyzeExec != nil {
return e.analyzeExec.Open(ctx)
}
return nil
}
// Close implements the Executor Close interface.
func (e *ExplainExec) Close() error {
e.rows = nil
return nil
}
// Next implements the Executor Next interface.
func (e *ExplainExec) Next(ctx context.Context, req *chunk.Chunk) error {
if e.rows == nil {
var err error
e.rows, err = e.generateExplainInfo(ctx)
if err != nil {
return err
}
}
req.GrowAndReset(e.maxChunkSize)
if e.cursor >= len(e.rows) {
return nil
}
numCurRows := mathutil.Min(req.Capacity(), len(e.rows)-e.cursor)
for i := e.cursor; i < e.cursor+numCurRows; i++ {
for j := range e.rows[i] {
req.AppendString(j, e.rows[i][j])
}
}
e.cursor += numCurRows
return nil
}
func (e *ExplainExec) executeAnalyzeExec(ctx context.Context) (err error) {
if e.analyzeExec != nil && !e.executed {
defer func() {
err1 := e.analyzeExec.Close()
if err1 != nil {
if err != nil {
err = errors.New(err.Error() + ", " + err1.Error())
} else {
err = err1
}
}
}()
e.executed = true
chk := newFirstChunk(e.analyzeExec)
for {
err = Next(ctx, e.analyzeExec, chk)
if err != nil || chk.NumRows() == 0 {
break
}
}
}
return err
}
func (e *ExplainExec) generateExplainInfo(ctx context.Context) (rows [][]string, err error) {
if err = e.executeAnalyzeExec(ctx); err != nil {
return nil, err
}
if err = e.explain.RenderResult(); err != nil {
return nil, err
}
return e.explain.Rows, nil
}
// getAnalyzeExecToExecutedNoDelay gets the analyze DML executor to execute in handleNoDelay function.
// For explain analyze insert/update/delete statement, the analyze executor should be executed in handleNoDelay
// function and then commit transaction if needed.
// Otherwise, in autocommit transaction, the table record change of analyze executor(insert/update/delete...)
// will not be committed.
func (e *ExplainExec) getAnalyzeExecToExecutedNoDelay() Executor {
if e.analyzeExec != nil && !e.executed && e.analyzeExec.Schema().Len() == 0 {
e.executed = true
return e.analyzeExec
}
return nil
}
| executor/explain.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017584161832928658,
0.00017070345347747207,
0.0001616804802324623,
0.00017080141697078943,
0.000003592185748857446
] |
{
"id": 11,
"code_window": [
"\t// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn3.LockKeys(ctx, lockCtx, k3)\n",
"\tc.Assert(err, IsNil)\n",
"\tstatus, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 984
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tablecodec
import (
"testing"
"github.com/pingcap/tidb/kv"
)
func BenchmarkEncodeRowKeyWithHandle(b *testing.B) {
for i := 0; i < b.N; i++ {
EncodeRowKeyWithHandle(100, kv.IntHandle(100))
}
}
func BenchmarkEncodeEndKey(b *testing.B) {
for i := 0; i < b.N; i++ {
EncodeRowKeyWithHandle(100, kv.IntHandle(100))
EncodeRowKeyWithHandle(100, kv.IntHandle(101))
}
}
// BenchmarkEncodeRowKeyWithPrefixNex tests the performance of encoding row key with prefixNext
// PrefixNext() is slow than using EncodeRowKeyWithHandle.
// BenchmarkEncodeEndKey-4 20000000 97.2 ns/op
// BenchmarkEncodeRowKeyWithPrefixNex-4 10000000 121 ns/op
func BenchmarkEncodeRowKeyWithPrefixNex(b *testing.B) {
for i := 0; i < b.N; i++ {
sk := EncodeRowKeyWithHandle(100, kv.IntHandle(100))
sk.PrefixNext()
}
}
func BenchmarkDecodeRowKey(b *testing.B) {
rowKey := EncodeRowKeyWithHandle(100, kv.IntHandle(100))
for i := 0; i < b.N; i++ {
_, err := DecodeRowKey(rowKey)
if err != nil {
b.Fatal(err)
}
}
}
| tablecodec/bench_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017673215188551694,
0.00017094776558224112,
0.00016529492859262973,
0.00017053060582838953,
0.000004236709628457902
] |
{
"id": 11,
"code_window": [
"\t// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn3.LockKeys(ctx, lockCtx, k3)\n",
"\tc.Assert(err, IsNil)\n",
"\tstatus, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 984
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/gcutil"
)
// StatsHandler is the handler for dumping statistics.
type StatsHandler struct {
do *domain.Domain
}
func (s *Server) newStatsHandler() *StatsHandler {
store, ok := s.driver.(*TiDBDriver)
if !ok {
panic("Illegal driver")
}
do, err := session.GetDomain(store.store)
if err != nil {
panic("Failed to get domain")
}
return &StatsHandler{do}
}
func (sh StatsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(req)
is := sh.do.InfoSchema()
h := sh.do.StatsHandle()
tbl, err := is.TableByName(model.NewCIStr(params[pDBName]), model.NewCIStr(params[pTableName]))
if err != nil {
writeError(w, err)
} else {
js, err := h.DumpStatsToJSON(params[pDBName], tbl.Meta(), nil)
if err != nil {
writeError(w, err)
} else {
writeData(w, js)
}
}
}
// StatsHistoryHandler is the handler for dumping statistics.
type StatsHistoryHandler struct {
do *domain.Domain
}
func (s *Server) newStatsHistoryHandler() *StatsHistoryHandler {
store, ok := s.driver.(*TiDBDriver)
if !ok {
panic("Illegal driver")
}
do, err := session.GetDomain(store.store)
if err != nil {
panic("Failed to get domain")
}
return &StatsHistoryHandler{do}
}
func (sh StatsHistoryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(req)
se, err := session.CreateSession(sh.do.Store())
if err != nil {
writeError(w, err)
return
}
se.GetSessionVars().StmtCtx.TimeZone = time.Local
t, err := types.ParseTime(se.GetSessionVars().StmtCtx, params[pSnapshot], mysql.TypeTimestamp, 6)
if err != nil {
writeError(w, err)
return
}
t1, err := t.GoTime(time.Local)
if err != nil {
writeError(w, err)
return
}
snapshot := oracle.GoTimeToTS(t1)
err = gcutil.ValidateSnapshot(se, snapshot)
if err != nil {
writeError(w, err)
return
}
is, err := sh.do.GetSnapshotInfoSchema(snapshot)
if err != nil {
writeError(w, err)
return
}
h := sh.do.StatsHandle()
tbl, err := is.TableByName(model.NewCIStr(params[pDBName]), model.NewCIStr(params[pTableName]))
if err != nil {
writeError(w, err)
return
}
js, err := h.DumpStatsToJSONBySnapshot(params[pDBName], tbl.Meta(), snapshot)
if err != nil {
writeError(w, err)
} else {
writeData(w, js)
}
}
| server/statistics_handler.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017627724446356297,
0.00016701752610970289,
0.00015980811440385878,
0.0001662819558987394,
0.000005443811005534371
] |
{
"id": 12,
"code_window": [
"\t// txn3 should locks k2 successfully using no wait\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\", \"return\"), IsNil)\n",
"\terr = txn3.LockKeys(context.Background(), lockCtx3, k2)\n",
"\tc.Assert(failpoint.Disable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\"), IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1020
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"fmt"
math2 "math"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/opcode"
"github.com/pingcap/parser/terror"
ptypes "github.com/pingcap/parser/types"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
tidbutil "github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/math"
"github.com/pingcap/tidb/util/plancodec"
"github.com/pingcap/tidb/util/stringutil"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
// PointGetPlan is a fast plan for simple point get.
// When we detect that the statement has a unique equal access condition, this plan is used.
// This plan is much faster to build and to execute because it avoid the optimization and coprocessor cost.
type PointGetPlan struct {
basePlan
dbName string
schema *expression.Schema
TblInfo *model.TableInfo
IndexInfo *model.IndexInfo
PartitionInfo *model.PartitionDefinition
Handle kv.Handle
HandleParam *driver.ParamMarkerExpr
IndexValues []types.Datum
IndexValueParams []*driver.ParamMarkerExpr
IdxCols []*expression.Column
IdxColLens []int
AccessConditions []expression.Expression
ctx sessionctx.Context
UnsignedHandle bool
IsTableDual bool
Lock bool
outputNames []*types.FieldName
LockWaitTime int64
partitionColumnPos int
Columns []*model.ColumnInfo
cost float64
}
type nameValuePair struct {
colName string
value types.Datum
param *driver.ParamMarkerExpr
}
// Schema implements the Plan interface.
func (p *PointGetPlan) Schema() *expression.Schema {
return p.schema
}
// Cost implements PhysicalPlan interface
func (p *PointGetPlan) Cost() float64 {
return p.cost
}
// SetCost implements PhysicalPlan interface
func (p *PointGetPlan) SetCost(cost float64) {
p.cost = cost
}
// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of
// current task. If the child's task is cop task, some operator may close this task and return a new rootTask.
func (p *PointGetPlan) attach2Task(...task) task {
return nil
}
// ToPB converts physical plan to tipb executor.
func (p *PointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, nil
}
// Clone implements PhysicalPlan interface.
func (p *PointGetPlan) Clone() (PhysicalPlan, error) {
return nil, errors.Errorf("%T doesn't support cloning.", p)
}
// ExplainInfo implements Plan interface.
func (p *PointGetPlan) ExplainInfo() string {
accessObject, operatorInfo := p.AccessObject(false), p.OperatorInfo(false)
if len(operatorInfo) == 0 {
return accessObject
}
return accessObject + ", " + operatorInfo
}
// ExplainNormalizedInfo implements Plan interface.
func (p *PointGetPlan) ExplainNormalizedInfo() string {
accessObject, operatorInfo := p.AccessObject(true), p.OperatorInfo(true)
if len(operatorInfo) == 0 {
return accessObject
}
return accessObject + ", " + operatorInfo
}
// AccessObject implements dataAccesser interface.
func (p *PointGetPlan) AccessObject(normalized bool) string {
buffer := bytes.NewBufferString("")
tblName := p.TblInfo.Name.O
fmt.Fprintf(buffer, "table:%s", tblName)
if p.PartitionInfo != nil {
if normalized {
fmt.Fprintf(buffer, ", partition:?")
} else {
fmt.Fprintf(buffer, ", partition:%s", p.PartitionInfo.Name.L)
}
}
if p.IndexInfo != nil {
if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle {
buffer.WriteString(", clustered index:" + p.IndexInfo.Name.O + "(")
} else {
buffer.WriteString(", index:" + p.IndexInfo.Name.O + "(")
}
for i, idxCol := range p.IndexInfo.Columns {
if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden {
buffer.WriteString(tblCol.GeneratedExprString)
} else {
buffer.WriteString(idxCol.Name.O)
}
if i+1 < len(p.IndexInfo.Columns) {
buffer.WriteString(", ")
}
}
buffer.WriteString(")")
}
return buffer.String()
}
// OperatorInfo implements dataAccesser interface.
func (p *PointGetPlan) OperatorInfo(normalized bool) string {
buffer := bytes.NewBufferString("")
if p.Handle != nil {
if normalized {
fmt.Fprintf(buffer, "handle:?, ")
} else {
if p.UnsignedHandle {
fmt.Fprintf(buffer, "handle:%d, ", uint64(p.Handle.IntValue()))
} else {
fmt.Fprintf(buffer, "handle:%s, ", p.Handle)
}
}
}
if p.Lock {
fmt.Fprintf(buffer, "lock, ")
}
if buffer.Len() >= 2 {
buffer.Truncate(buffer.Len() - 2)
}
return buffer.String()
}
// ExtractCorrelatedCols implements PhysicalPlan interface.
func (p *PointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn {
return nil
}
// GetChildReqProps gets the required property by child index.
func (p *PointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty {
return nil
}
// StatsCount will return the the RowCount of property.StatsInfo for this plan.
func (p *PointGetPlan) StatsCount() float64 {
return 1
}
// statsInfo will return the the RowCount of property.StatsInfo for this plan.
func (p *PointGetPlan) statsInfo() *property.StatsInfo {
if p.stats == nil {
p.stats = &property.StatsInfo{}
}
p.stats.RowCount = 1
return p.stats
}
// Children gets all the children.
func (p *PointGetPlan) Children() []PhysicalPlan {
return nil
}
// SetChildren sets the children for the plan.
func (p *PointGetPlan) SetChildren(...PhysicalPlan) {}
// SetChild sets a specific child for the plan.
func (p *PointGetPlan) SetChild(i int, child PhysicalPlan) {}
// ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices.
func (p *PointGetPlan) ResolveIndices() error {
return resolveIndicesForVirtualColumn(p.schema.Columns, p.schema)
}
// OutputNames returns the outputting names of each column.
func (p *PointGetPlan) OutputNames() types.NameSlice {
return p.outputNames
}
// SetOutputNames sets the outputting name by the given slice.
func (p *PointGetPlan) SetOutputNames(names types.NameSlice) {
p.outputNames = names
}
// GetCost returns cost of the PointGetPlan.
func (p *PointGetPlan) GetCost(cols []*expression.Column) float64 {
sessVars := p.ctx.GetSessionVars()
var rowSize float64
cost := 0.0
if p.IndexInfo == nil {
rowSize = p.stats.HistColl.GetTableAvgRowSize(p.ctx, cols, kv.TiKV, true)
} else {
rowSize = p.stats.HistColl.GetIndexAvgRowSize(p.ctx, cols, p.IndexInfo.Unique)
}
cost += rowSize * sessVars.NetworkFactor
cost += sessVars.SeekFactor
cost /= float64(sessVars.DistSQLScanConcurrency())
return cost
}
// BatchPointGetPlan represents a physical plan which contains a bunch of
// keys reference the same table and use the same `unique key`
type BatchPointGetPlan struct {
baseSchemaProducer
ctx sessionctx.Context
dbName string
TblInfo *model.TableInfo
IndexInfo *model.IndexInfo
Handles []kv.Handle
HandleParams []*driver.ParamMarkerExpr
IndexValues [][]types.Datum
IndexValueParams [][]*driver.ParamMarkerExpr
AccessConditions []expression.Expression
IdxCols []*expression.Column
IdxColLens []int
PartitionColPos int
KeepOrder bool
Desc bool
Lock bool
LockWaitTime int64
Columns []*model.ColumnInfo
cost float64
// SinglePart indicates whether this BatchPointGetPlan is just for a single partition, instead of the whole partition table.
// If the BatchPointGetPlan is built in fast path, this value if false; if the plan is generated in physical optimization for a partition,
// this value would be true. This value would decide the behavior of BatchPointGetExec, i.e, whether to compute the table ID of the partition
// on the fly.
SinglePart bool
// PartTblID is the table ID for the specific table partition.
PartTblID int64
}
// Cost implements PhysicalPlan interface
func (p *BatchPointGetPlan) Cost() float64 {
return p.cost
}
// SetCost implements PhysicalPlan interface
func (p *BatchPointGetPlan) SetCost(cost float64) {
p.cost = cost
}
// Clone implements PhysicalPlan interface.
func (p *BatchPointGetPlan) Clone() (PhysicalPlan, error) {
return nil, errors.Errorf("%T doesn't support cloning", p)
}
// ExtractCorrelatedCols implements PhysicalPlan interface.
func (p *BatchPointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn {
return nil
}
// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of
// current task. If the child's task is cop task, some operator may close this task and return a new rootTask.
func (p *BatchPointGetPlan) attach2Task(...task) task {
return nil
}
// ToPB converts physical plan to tipb executor.
func (p *BatchPointGetPlan) ToPB(ctx sessionctx.Context, _ kv.StoreType) (*tipb.Executor, error) {
return nil, nil
}
// ExplainInfo implements Plan interface.
func (p *BatchPointGetPlan) ExplainInfo() string {
return p.AccessObject(false) + ", " + p.OperatorInfo(false)
}
// ExplainNormalizedInfo implements Plan interface.
func (p *BatchPointGetPlan) ExplainNormalizedInfo() string {
return p.AccessObject(true) + ", " + p.OperatorInfo(true)
}
// AccessObject implements physicalScan interface.
func (p *BatchPointGetPlan) AccessObject(_ bool) string {
buffer := bytes.NewBufferString("")
tblName := p.TblInfo.Name.O
fmt.Fprintf(buffer, "table:%s", tblName)
if p.IndexInfo != nil {
if p.IndexInfo.Primary && p.TblInfo.IsCommonHandle {
buffer.WriteString(", clustered index:" + p.IndexInfo.Name.O + "(")
} else {
buffer.WriteString(", index:" + p.IndexInfo.Name.O + "(")
}
for i, idxCol := range p.IndexInfo.Columns {
if tblCol := p.TblInfo.Columns[idxCol.Offset]; tblCol.Hidden {
buffer.WriteString(tblCol.GeneratedExprString)
} else {
buffer.WriteString(idxCol.Name.O)
}
if i+1 < len(p.IndexInfo.Columns) {
buffer.WriteString(", ")
}
}
buffer.WriteString(")")
}
return buffer.String()
}
// OperatorInfo implements dataAccesser interface.
func (p *BatchPointGetPlan) OperatorInfo(normalized bool) string {
buffer := bytes.NewBufferString("")
if p.IndexInfo == nil {
if normalized {
fmt.Fprintf(buffer, "handle:?, ")
} else {
fmt.Fprintf(buffer, "handle:%v, ", p.Handles)
}
}
fmt.Fprintf(buffer, "keep order:%v, ", p.KeepOrder)
fmt.Fprintf(buffer, "desc:%v, ", p.Desc)
if p.Lock {
fmt.Fprintf(buffer, "lock, ")
}
if buffer.Len() >= 2 {
buffer.Truncate(buffer.Len() - 2)
}
return buffer.String()
}
// GetChildReqProps gets the required property by child index.
func (p *BatchPointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty {
return nil
}
// StatsCount will return the the RowCount of property.StatsInfo for this plan.
func (p *BatchPointGetPlan) StatsCount() float64 {
return p.statsInfo().RowCount
}
// statsInfo will return the the RowCount of property.StatsInfo for this plan.
func (p *BatchPointGetPlan) statsInfo() *property.StatsInfo {
return p.stats
}
// Children gets all the children.
func (p *BatchPointGetPlan) Children() []PhysicalPlan {
return nil
}
// SetChildren sets the children for the plan.
func (p *BatchPointGetPlan) SetChildren(...PhysicalPlan) {}
// SetChild sets a specific child for the plan.
func (p *BatchPointGetPlan) SetChild(i int, child PhysicalPlan) {}
// ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices.
func (p *BatchPointGetPlan) ResolveIndices() error {
return resolveIndicesForVirtualColumn(p.schema.Columns, p.schema)
}
// OutputNames returns the outputting names of each column.
func (p *BatchPointGetPlan) OutputNames() types.NameSlice {
return p.names
}
// SetOutputNames sets the outputting name by the given slice.
func (p *BatchPointGetPlan) SetOutputNames(names types.NameSlice) {
p.names = names
}
// GetCost returns cost of the PointGetPlan.
func (p *BatchPointGetPlan) GetCost(cols []*expression.Column) float64 {
sessVars := p.ctx.GetSessionVars()
var rowSize, rowCount float64
cost := 0.0
if p.IndexInfo == nil {
rowCount = float64(len(p.Handles))
rowSize = p.stats.HistColl.GetTableAvgRowSize(p.ctx, cols, kv.TiKV, true)
} else {
rowCount = float64(len(p.IndexValues))
rowSize = p.stats.HistColl.GetIndexAvgRowSize(p.ctx, cols, p.IndexInfo.Unique)
}
cost += rowCount * rowSize * sessVars.NetworkFactor
cost += rowCount * sessVars.SeekFactor
cost /= float64(sessVars.DistSQLScanConcurrency())
return cost
}
// PointPlanKey is used to get point plan that is pre-built for multi-statement query.
const PointPlanKey = stringutil.StringerStr("pointPlanKey")
// PointPlanVal is used to store point plan that is pre-built for multi-statement query.
// Save the plan in a struct so even if the point plan is nil, we don't need to try again.
type PointPlanVal struct {
Plan Plan
}
// TryFastPlan tries to use the PointGetPlan for the query.
func TryFastPlan(ctx sessionctx.Context, node ast.Node) (p Plan) {
ctx.GetSessionVars().PlanID = 0
ctx.GetSessionVars().PlanColumnID = 0
switch x := node.(type) {
case *ast.SelectStmt:
defer func() {
if ctx.GetSessionVars().SelectLimit != math2.MaxUint64 && p != nil {
ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("sql_select_limit is set, so point get plan is not activated"))
p = nil
}
}()
// Try to convert the `SELECT a, b, c FROM t WHERE (a, b, c) in ((1, 2, 4), (1, 3, 5))` to
// `PhysicalUnionAll` which children are `PointGet` if exists an unique key (a, b, c) in table `t`
if fp := tryWhereIn2BatchPointGet(ctx, x); fp != nil {
if checkFastPlanPrivilege(ctx, fp.dbName, fp.TblInfo.Name.L, mysql.SelectPriv) != nil {
return
}
if tidbutil.IsMemDB(fp.dbName) {
return nil
}
fp.Lock, fp.LockWaitTime = getLockWaitTime(ctx, x.LockInfo)
p = fp
return
}
if fp := tryPointGetPlan(ctx, x, isForUpdateReadSelectLock(x.LockInfo)); fp != nil {
if checkFastPlanPrivilege(ctx, fp.dbName, fp.TblInfo.Name.L, mysql.SelectPriv) != nil {
return nil
}
if tidbutil.IsMemDB(fp.dbName) {
return nil
}
if fp.IsTableDual {
tableDual := PhysicalTableDual{}
tableDual.names = fp.outputNames
tableDual.SetSchema(fp.Schema())
p = tableDual.Init(ctx, &property.StatsInfo{}, 0)
return
}
fp.Lock, fp.LockWaitTime = getLockWaitTime(ctx, x.LockInfo)
p = fp
return
}
case *ast.UpdateStmt:
return tryUpdatePointPlan(ctx, x)
case *ast.DeleteStmt:
return tryDeletePointPlan(ctx, x)
}
return nil
}
// IsSelectForUpdateLockType checks if the select lock type is for update type.
func IsSelectForUpdateLockType(lockType ast.SelectLockType) bool {
if lockType == ast.SelectLockForUpdate ||
lockType == ast.SelectLockForShare ||
lockType == ast.SelectLockForUpdateNoWait ||
lockType == ast.SelectLockForUpdateWaitN {
return true
}
return false
}
func getLockWaitTime(ctx sessionctx.Context, lockInfo *ast.SelectLockInfo) (lock bool, waitTime int64) {
if lockInfo != nil {
if IsSelectForUpdateLockType(lockInfo.LockType) {
// Locking of rows for update using SELECT FOR UPDATE only applies when autocommit
// is disabled (either by beginning transaction with START TRANSACTION or by setting
// autocommit to 0. If autocommit is enabled, the rows matching the specification are not locked.
// See https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
sessVars := ctx.GetSessionVars()
if !sessVars.IsAutocommit() || sessVars.InTxn() {
lock = true
waitTime = sessVars.LockWaitTimeout
if lockInfo.LockType == ast.SelectLockForUpdateWaitN {
waitTime = int64(lockInfo.WaitSec * 1000)
} else if lockInfo.LockType == ast.SelectLockForUpdateNoWait {
waitTime = kv.LockNoWait
}
}
}
}
return
}
func newBatchPointGetPlan(
ctx sessionctx.Context, patternInExpr *ast.PatternInExpr,
handleCol *model.ColumnInfo, tbl *model.TableInfo, schema *expression.Schema,
names []*types.FieldName, whereColNames []string, indexHints []*ast.IndexHint,
) *BatchPointGetPlan {
statsInfo := &property.StatsInfo{RowCount: float64(len(patternInExpr.List))}
var partitionColName *ast.ColumnName
if tbl.GetPartitionInfo() != nil {
partitionColName = getHashPartitionColumnName(ctx, tbl)
if partitionColName == nil {
return nil
}
}
if handleCol != nil {
var handles = make([]kv.Handle, len(patternInExpr.List))
var handleParams = make([]*driver.ParamMarkerExpr, len(patternInExpr.List))
for i, item := range patternInExpr.List {
// SELECT * FROM t WHERE (key) in ((1), (2))
if p, ok := item.(*ast.ParenthesesExpr); ok {
item = p.Expr
}
var d types.Datum
var param *driver.ParamMarkerExpr
switch x := item.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
default:
return nil
}
if d.IsNull() {
return nil
}
if !checkCanConvertInPointGet(handleCol, d) {
return nil
}
intDatum, err := d.ConvertTo(ctx.GetSessionVars().StmtCtx, &handleCol.FieldType)
if err != nil {
return nil
}
// The converted result must be same as original datum
cmp, err := intDatum.CompareDatum(ctx.GetSessionVars().StmtCtx, &d)
if err != nil || cmp != 0 {
return nil
}
handles[i] = kv.IntHandle(intDatum.GetInt64())
handleParams[i] = param
}
return BatchPointGetPlan{
TblInfo: tbl,
Handles: handles,
HandleParams: handleParams,
}.Init(ctx, statsInfo, schema, names, 0)
}
// The columns in where clause should be covered by unique index
var matchIdxInfo *model.IndexInfo
permutations := make([]int, len(whereColNames))
colInfos := make([]*model.ColumnInfo, len(whereColNames))
for i, innerCol := range whereColNames {
for _, col := range tbl.Columns {
if col.Name.L == innerCol {
colInfos[i] = col
}
}
}
for _, idxInfo := range tbl.Indices {
if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible ||
!indexIsAvailableByHints(idxInfo, indexHints) {
continue
}
if len(idxInfo.Columns) != len(whereColNames) || idxInfo.HasPrefixIndex() {
continue
}
// TODO: not sure is there any function to reuse
matched := true
for whereColIndex, innerCol := range whereColNames {
var found bool
for i, col := range idxInfo.Columns {
if innerCol == col.Name.L {
permutations[whereColIndex] = i
found = true
break
}
}
if !found {
matched = false
break
}
}
if matched {
matchIdxInfo = idxInfo
break
}
}
if matchIdxInfo == nil {
return nil
}
indexValues := make([][]types.Datum, len(patternInExpr.List))
indexValueParams := make([][]*driver.ParamMarkerExpr, len(patternInExpr.List))
for i, item := range patternInExpr.List {
// SELECT * FROM t WHERE (key) in ((1), (2))
if p, ok := item.(*ast.ParenthesesExpr); ok {
item = p.Expr
}
var values []types.Datum
var valuesParams []*driver.ParamMarkerExpr
switch x := item.(type) {
case *ast.RowExpr:
// The `len(values) == len(valuesParams)` should be satisfied in this mode
if len(x.Values) != len(whereColNames) {
return nil
}
values = make([]types.Datum, len(x.Values))
valuesParams = make([]*driver.ParamMarkerExpr, len(x.Values))
for index, inner := range x.Values {
permIndex := permutations[index]
switch innerX := inner.(type) {
case *driver.ValueExpr:
if !checkCanConvertInPointGet(colInfos[index], innerX.Datum) {
return nil
}
values[permIndex] = innerX.Datum
case *driver.ParamMarkerExpr:
if !checkCanConvertInPointGet(colInfos[index], innerX.Datum) {
return nil
}
values[permIndex] = innerX.Datum
valuesParams[permIndex] = innerX
default:
return nil
}
}
case *driver.ValueExpr:
// if any item is `ValueExpr` type, `Expr` should contain only one column,
// otherwise column count doesn't match and no plan can be built.
if len(whereColNames) != 1 {
return nil
}
if !checkCanConvertInPointGet(colInfos[0], x.Datum) {
return nil
}
values = []types.Datum{x.Datum}
case *driver.ParamMarkerExpr:
if len(whereColNames) != 1 {
return nil
}
if !checkCanConvertInPointGet(colInfos[0], x.Datum) {
return nil
}
values = []types.Datum{x.Datum}
valuesParams = []*driver.ParamMarkerExpr{x}
default:
return nil
}
indexValues[i] = values
indexValueParams[i] = valuesParams
}
return BatchPointGetPlan{
TblInfo: tbl,
IndexInfo: matchIdxInfo,
IndexValues: indexValues,
IndexValueParams: indexValueParams,
PartitionColPos: getPartitionColumnPos(matchIdxInfo, partitionColName),
}.Init(ctx, statsInfo, schema, names, 0)
}
func tryWhereIn2BatchPointGet(ctx sessionctx.Context, selStmt *ast.SelectStmt) *BatchPointGetPlan {
if selStmt.OrderBy != nil || selStmt.GroupBy != nil ||
selStmt.Limit != nil || selStmt.Having != nil ||
len(selStmt.WindowSpecs) > 0 {
return nil
}
in, ok := selStmt.Where.(*ast.PatternInExpr)
if !ok || in.Not || len(in.List) < 1 {
return nil
}
tblName, tblAlias := getSingleTableNameAndAlias(selStmt.From)
if tblName == nil {
return nil
}
tbl := tblName.TableInfo
if tbl == nil {
return nil
}
// Skip the optimization with partition selection.
if len(tblName.PartitionNames) > 0 {
return nil
}
for _, col := range tbl.Columns {
if col.IsGenerated() || col.State != model.StatePublic {
return nil
}
}
schema, names := buildSchemaFromFields(tblName.Schema, tbl, tblAlias, selStmt.Fields.Fields)
if schema == nil {
return nil
}
var (
handleCol *model.ColumnInfo
whereColNames []string
)
// SELECT * FROM t WHERE (key) in ((1), (2))
colExpr := in.Expr
if p, ok := colExpr.(*ast.ParenthesesExpr); ok {
colExpr = p.Expr
}
switch colName := colExpr.(type) {
case *ast.ColumnNameExpr:
if name := colName.Name.Table.L; name != "" && name != tblAlias.L {
return nil
}
// Try use handle
if tbl.PKIsHandle {
for _, col := range tbl.Columns {
if mysql.HasPriKeyFlag(col.Flag) && col.Name.L == colName.Name.Name.L {
handleCol = col
whereColNames = append(whereColNames, col.Name.L)
break
}
}
}
if handleCol == nil {
// Downgrade to use unique index
whereColNames = append(whereColNames, colName.Name.Name.L)
}
case *ast.RowExpr:
for _, col := range colName.Values {
c, ok := col.(*ast.ColumnNameExpr)
if !ok {
return nil
}
if name := c.Name.Table.L; name != "" && name != tblAlias.L {
return nil
}
whereColNames = append(whereColNames, c.Name.Name.L)
}
default:
return nil
}
p := newBatchPointGetPlan(ctx, in, handleCol, tbl, schema, names, whereColNames, tblName.IndexHints)
if p == nil {
return nil
}
p.dbName = tblName.Schema.L
if p.dbName == "" {
p.dbName = ctx.GetSessionVars().CurrentDB
}
return p
}
// tryPointGetPlan determine if the SelectStmt can use a PointGetPlan.
// Returns nil if not applicable.
// To use the PointGetPlan the following rules must be satisfied:
// 1. For the limit clause, the count should at least 1 and the offset is 0.
// 2. It must be a single table select.
// 3. All the columns must be public and generated.
// 4. The condition is an access path that the range is a unique key.
func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt, check bool) *PointGetPlan {
if selStmt.Having != nil {
return nil
} else if selStmt.Limit != nil {
count, offset, err := extractLimitCountOffset(ctx, selStmt.Limit)
if err != nil || count == 0 || offset > 0 {
return nil
}
}
tblName, tblAlias := getSingleTableNameAndAlias(selStmt.From)
if tblName == nil {
return nil
}
tbl := tblName.TableInfo
if tbl == nil {
return nil
}
pi := tbl.GetPartitionInfo()
if pi != nil && pi.Type != model.PartitionTypeHash {
return nil
}
for _, col := range tbl.Columns {
// Do not handle generated columns.
if col.IsGenerated() {
return nil
}
// Only handle tables that all columns are public.
if col.State != model.StatePublic {
return nil
}
}
schema, names := buildSchemaFromFields(tblName.Schema, tbl, tblAlias, selStmt.Fields.Fields)
if schema == nil {
return nil
}
dbName := tblName.Schema.L
if dbName == "" {
dbName = ctx.GetSessionVars().CurrentDB
}
pairs := make([]nameValuePair, 0, 4)
pairs, isTableDual := getNameValuePairs(ctx.GetSessionVars().StmtCtx, tbl, tblAlias, pairs, selStmt.Where)
if pairs == nil && !isTableDual {
return nil
}
var partitionInfo *model.PartitionDefinition
var pos int
if pi != nil {
partitionInfo, pos = getPartitionInfo(ctx, tbl, pairs)
if partitionInfo == nil {
return nil
}
// Take partition selection into consideration.
if len(tblName.PartitionNames) > 0 {
if !partitionNameInSet(partitionInfo.Name, tblName.PartitionNames) {
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
}
}
handlePair, fieldType := findPKHandle(tbl, pairs)
if handlePair.value.Kind() != types.KindNull && len(pairs) == 1 && indexIsAvailableByHints(nil, tblName.IndexHints) {
if isTableDual {
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
p := newPointGetPlan(ctx, dbName, schema, tbl, names)
p.Handle = kv.IntHandle(handlePair.value.GetInt64())
p.UnsignedHandle = mysql.HasUnsignedFlag(fieldType.Flag)
p.HandleParam = handlePair.param
p.PartitionInfo = partitionInfo
return p
} else if handlePair.value.Kind() != types.KindNull {
return nil
}
check = check && ctx.GetSessionVars().ConnectionID > 0
var latestIndexes map[int64]*model.IndexInfo
var err error
for _, idxInfo := range tbl.Indices {
if !idxInfo.Unique || idxInfo.State != model.StatePublic || idxInfo.Invisible ||
!indexIsAvailableByHints(idxInfo, tblName.IndexHints) {
continue
}
if isTableDual {
if check && latestIndexes == nil {
latestIndexes, check, err = getLatestIndexInfo(ctx, tbl.ID, 0)
if err != nil {
logutil.BgLogger().Warn("get information schema failed", zap.Error(err))
return nil
}
}
if check {
if latestIndex, ok := latestIndexes[idxInfo.ID]; !ok || latestIndex.State != model.StatePublic {
continue
}
}
p := newPointGetPlan(ctx, tblName.Schema.O, schema, tbl, names)
p.IsTableDual = true
return p
}
idxValues, idxValueParams := getIndexValues(idxInfo, pairs)
if idxValues == nil {
continue
}
if check && latestIndexes == nil {
latestIndexes, check, err = getLatestIndexInfo(ctx, tbl.ID, 0)
if err != nil {
logutil.BgLogger().Warn("get information schema failed", zap.Error(err))
return nil
}
}
if check {
if latestIndex, ok := latestIndexes[idxInfo.ID]; !ok || latestIndex.State != model.StatePublic {
continue
}
}
p := newPointGetPlan(ctx, dbName, schema, tbl, names)
p.IndexInfo = idxInfo
p.IndexValues = idxValues
p.IndexValueParams = idxValueParams
p.PartitionInfo = partitionInfo
if p.PartitionInfo != nil {
p.partitionColumnPos = findPartitionIdx(idxInfo, pos, pairs)
}
return p
}
return nil
}
// indexIsAvailableByHints checks whether this index is filtered by these specified index hints.
// idxInfo is PK if it's nil
func indexIsAvailableByHints(idxInfo *model.IndexInfo, idxHints []*ast.IndexHint) bool {
if len(idxHints) == 0 {
return true
}
match := func(name model.CIStr) bool {
if idxInfo == nil {
return name.L == "primary"
}
return idxInfo.Name.L == name.L
}
// NOTICE: it's supposed that ignore hints and use/force hints will not be applied together since the effect of
// the former will be eliminated by the latter.
isIgnore := false
for _, hint := range idxHints {
if hint.HintScope != ast.HintForScan {
continue
}
if hint.HintType == ast.HintIgnore && hint.IndexNames != nil {
isIgnore = true
for _, name := range hint.IndexNames {
if match(name) {
return false
}
}
}
if (hint.HintType == ast.HintForce || hint.HintType == ast.HintUse) && hint.IndexNames != nil {
for _, name := range hint.IndexNames {
if match(name) {
return true
}
}
}
}
return isIgnore
}
func partitionNameInSet(name model.CIStr, pnames []model.CIStr) bool {
for _, pname := range pnames {
// Case insensitive, create table partition p0, query using P0 is OK.
if name.L == pname.L {
return true
}
}
return false
}
func newPointGetPlan(ctx sessionctx.Context, dbName string, schema *expression.Schema, tbl *model.TableInfo, names []*types.FieldName) *PointGetPlan {
p := &PointGetPlan{
basePlan: newBasePlan(ctx, plancodec.TypePointGet, 0),
dbName: dbName,
schema: schema,
TblInfo: tbl,
outputNames: names,
LockWaitTime: ctx.GetSessionVars().LockWaitTimeout,
}
ctx.GetSessionVars().StmtCtx.Tables = []stmtctx.TableEntry{{DB: dbName, Table: tbl.Name.L}}
return p
}
func checkFastPlanPrivilege(ctx sessionctx.Context, dbName, tableName string, checkTypes ...mysql.PrivilegeType) error {
pm := privilege.GetPrivilegeManager(ctx)
var visitInfos []visitInfo
for _, checkType := range checkTypes {
if pm != nil && !pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, dbName, tableName, "", checkType) {
return errors.New("privilege check fail")
}
// This visitInfo is only for table lock check, so we do not need column field,
// just fill it empty string.
visitInfos = append(visitInfos, visitInfo{
privilege: checkType,
db: dbName,
table: tableName,
column: "",
err: nil,
})
}
infoSchema := infoschema.GetInfoSchema(ctx)
return CheckTableLock(ctx, infoSchema, visitInfos)
}
func buildSchemaFromFields(
dbName model.CIStr,
tbl *model.TableInfo,
tblName model.CIStr,
fields []*ast.SelectField,
) (
*expression.Schema,
[]*types.FieldName,
) {
columns := make([]*expression.Column, 0, len(tbl.Columns)+1)
names := make([]*types.FieldName, 0, len(tbl.Columns)+1)
if len(fields) > 0 {
for _, field := range fields {
if field.WildCard != nil {
if field.WildCard.Table.L != "" && field.WildCard.Table.L != tblName.L {
return nil, nil
}
for _, col := range tbl.Columns {
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
ColName: col.Name,
})
columns = append(columns, colInfoToColumn(col, len(columns)))
}
continue
}
colNameExpr, ok := field.Expr.(*ast.ColumnNameExpr)
if !ok {
return nil, nil
}
if colNameExpr.Name.Table.L != "" && colNameExpr.Name.Table.L != tblName.L {
return nil, nil
}
col := findCol(tbl, colNameExpr.Name)
if col == nil {
return nil, nil
}
asName := colNameExpr.Name.Name
if field.AsName.L != "" {
asName = field.AsName
}
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
OrigColName: col.Name,
ColName: asName,
})
columns = append(columns, colInfoToColumn(col, len(columns)))
}
return expression.NewSchema(columns...), names
}
// fields len is 0 for update and delete.
for _, col := range tbl.Columns {
names = append(names, &types.FieldName{
DBName: dbName,
OrigTblName: tbl.Name,
TblName: tblName,
ColName: col.Name,
})
column := colInfoToColumn(col, len(columns))
columns = append(columns, column)
}
schema := expression.NewSchema(columns...)
return schema, names
}
// getSingleTableNameAndAlias return the ast node of queried table name and the alias string.
// `tblName` is `nil` if there are multiple tables in the query.
// `tblAlias` will be the real table name if there is no table alias in the query.
func getSingleTableNameAndAlias(tableRefs *ast.TableRefsClause) (tblName *ast.TableName, tblAlias model.CIStr) {
if tableRefs == nil || tableRefs.TableRefs == nil || tableRefs.TableRefs.Right != nil {
return nil, tblAlias
}
tblSrc, ok := tableRefs.TableRefs.Left.(*ast.TableSource)
if !ok {
return nil, tblAlias
}
tblName, ok = tblSrc.Source.(*ast.TableName)
if !ok {
return nil, tblAlias
}
tblAlias = tblSrc.AsName
if tblSrc.AsName.L == "" {
tblAlias = tblName.Name
}
return tblName, tblAlias
}
// getNameValuePairs extracts `column = constant/paramMarker` conditions from expr as name value pairs.
func getNameValuePairs(stmtCtx *stmtctx.StatementContext, tbl *model.TableInfo, tblName model.CIStr, nvPairs []nameValuePair, expr ast.ExprNode) (
pairs []nameValuePair, isTableDual bool) {
binOp, ok := expr.(*ast.BinaryOperationExpr)
if !ok {
return nil, false
}
if binOp.Op == opcode.LogicAnd {
nvPairs, isTableDual = getNameValuePairs(stmtCtx, tbl, tblName, nvPairs, binOp.L)
if nvPairs == nil || isTableDual {
return nil, isTableDual
}
nvPairs, isTableDual = getNameValuePairs(stmtCtx, tbl, tblName, nvPairs, binOp.R)
if nvPairs == nil || isTableDual {
return nil, isTableDual
}
return nvPairs, isTableDual
} else if binOp.Op == opcode.EQ {
var d types.Datum
var colName *ast.ColumnNameExpr
var param *driver.ParamMarkerExpr
var ok bool
if colName, ok = binOp.L.(*ast.ColumnNameExpr); ok {
switch x := binOp.R.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
}
} else if colName, ok = binOp.R.(*ast.ColumnNameExpr); ok {
switch x := binOp.L.(type) {
case *driver.ValueExpr:
d = x.Datum
case *driver.ParamMarkerExpr:
d = x.Datum
param = x
}
} else {
return nil, false
}
if d.IsNull() {
return nil, false
}
// Views' columns have no FieldType.
if tbl.IsView() {
return nil, false
}
if colName.Name.Table.L != "" && colName.Name.Table.L != tblName.L {
return nil, false
}
col := model.FindColumnInfo(tbl.Cols(), colName.Name.Name.L)
if col == nil || // Handling the case when the column is _tidb_rowid.
(col.Tp == mysql.TypeString && col.Collate == charset.CollationBin) { // This type we needn't to pad `\0` in here.
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: d, param: param}), false
}
if !checkCanConvertInPointGet(col, d) {
return nil, false
}
dVal, err := d.ConvertTo(stmtCtx, &col.FieldType)
if err != nil {
if terror.ErrorEqual(types.ErrOverflow, err) {
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: d, param: param}), true
}
// Some scenarios cast to int with error, but we may use this value in point get.
if !terror.ErrorEqual(types.ErrTruncatedWrongVal, err) {
return nil, false
}
}
// The converted result must be same as original datum.
// Compare them based on the dVal's type.
cmp, err := dVal.CompareDatum(stmtCtx, &d)
if err != nil {
return nil, false
} else if cmp != 0 {
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: dVal, param: param}), true
}
return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, value: dVal, param: param}), false
}
return nil, false
}
func checkCanConvertInPointGet(col *model.ColumnInfo, d types.Datum) bool {
kind := d.Kind()
switch col.FieldType.EvalType() {
case ptypes.ETString:
switch kind {
case types.KindInt64, types.KindUint64,
types.KindFloat32, types.KindFloat64, types.KindMysqlDecimal:
// column type is String and constant type is numeric
return false
}
}
switch col.FieldType.Tp {
case mysql.TypeBit:
switch kind {
case types.KindString:
// column type is Bit and constant type is string
return false
}
}
return true
}
func findPKHandle(tblInfo *model.TableInfo, pairs []nameValuePair) (handlePair nameValuePair, fieldType *types.FieldType) {
if !tblInfo.PKIsHandle {
rowIDIdx := findInPairs("_tidb_rowid", pairs)
if rowIDIdx != -1 {
return pairs[rowIDIdx], types.NewFieldType(mysql.TypeLonglong)
}
return handlePair, nil
}
for _, col := range tblInfo.Columns {
if mysql.HasPriKeyFlag(col.Flag) {
i := findInPairs(col.Name.L, pairs)
if i == -1 {
return handlePair, nil
}
return pairs[i], &col.FieldType
}
}
return handlePair, nil
}
func getIndexValues(idxInfo *model.IndexInfo, pairs []nameValuePair) ([]types.Datum, []*driver.ParamMarkerExpr) {
idxValues := make([]types.Datum, 0, 4)
idxValueParams := make([]*driver.ParamMarkerExpr, 0, 4)
if len(idxInfo.Columns) != len(pairs) {
return nil, nil
}
if idxInfo.HasPrefixIndex() {
return nil, nil
}
for _, idxCol := range idxInfo.Columns {
i := findInPairs(idxCol.Name.L, pairs)
if i == -1 {
return nil, nil
}
idxValues = append(idxValues, pairs[i].value)
idxValueParams = append(idxValueParams, pairs[i].param)
}
if len(idxValues) > 0 {
return idxValues, idxValueParams
}
return nil, nil
}
func findInPairs(colName string, pairs []nameValuePair) int {
for i, pair := range pairs {
if pair.colName == colName {
return i
}
}
return -1
}
func tryUpdatePointPlan(ctx sessionctx.Context, updateStmt *ast.UpdateStmt) Plan {
selStmt := &ast.SelectStmt{
Fields: &ast.FieldList{},
From: updateStmt.TableRefs,
Where: updateStmt.Where,
OrderBy: updateStmt.Order,
Limit: updateStmt.Limit,
}
pointGet := tryPointGetPlan(ctx, selStmt, true)
if pointGet != nil {
if pointGet.IsTableDual {
return PhysicalTableDual{
names: pointGet.outputNames,
}.Init(ctx, &property.StatsInfo{}, 0)
}
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
pointGet.Lock, pointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointUpdatePlan(ctx, pointGet, pointGet.dbName, pointGet.TblInfo, updateStmt)
}
batchPointGet := tryWhereIn2BatchPointGet(ctx, selStmt)
if batchPointGet != nil {
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
batchPointGet.Lock, batchPointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointUpdatePlan(ctx, batchPointGet, batchPointGet.dbName, batchPointGet.TblInfo, updateStmt)
}
return nil
}
func buildPointUpdatePlan(ctx sessionctx.Context, pointPlan PhysicalPlan, dbName string, tbl *model.TableInfo, updateStmt *ast.UpdateStmt) Plan {
if checkFastPlanPrivilege(ctx, dbName, tbl.Name.L, mysql.SelectPriv, mysql.UpdatePriv) != nil {
return nil
}
orderedList, allAssignmentsAreConstant := buildOrderedList(ctx, pointPlan, updateStmt.List)
if orderedList == nil {
return nil
}
handleCols := buildHandleCols(ctx, tbl, pointPlan.Schema())
updatePlan := Update{
SelectPlan: pointPlan,
OrderedList: orderedList,
TblColPosInfos: TblColPosInfoSlice{
TblColPosInfo{
TblID: tbl.ID,
Start: 0,
End: pointPlan.Schema().Len(),
HandleCols: handleCols,
},
},
AllAssignmentsAreConstant: allAssignmentsAreConstant,
VirtualAssignmentsOffset: len(orderedList),
}.Init(ctx)
updatePlan.names = pointPlan.OutputNames()
is := infoschema.GetInfoSchema(ctx)
t, _ := is.TableByID(tbl.ID)
updatePlan.tblID2Table = map[int64]table.Table{
tbl.ID: t,
}
if tbl.GetPartitionInfo() != nil {
pt := t.(table.PartitionedTable)
var updateTableList []*ast.TableName
updateTableList = extractTableList(updateStmt.TableRefs.TableRefs, updateTableList, true)
updatePlan.PartitionedTable = make([]table.PartitionedTable, 0, len(updateTableList))
for _, updateTable := range updateTableList {
if len(updateTable.PartitionNames) > 0 {
pids := make(map[int64]struct{}, len(updateTable.PartitionNames))
for _, name := range updateTable.PartitionNames {
pid, err := tables.FindPartitionByName(tbl, name.L)
if err != nil {
return updatePlan
}
pids[pid] = struct{}{}
}
pt = tables.NewPartitionTableithGivenSets(pt, pids)
}
updatePlan.PartitionedTable = append(updatePlan.PartitionedTable, pt)
}
}
return updatePlan
}
func buildOrderedList(ctx sessionctx.Context, plan Plan, list []*ast.Assignment,
) (orderedList []*expression.Assignment, allAssignmentsAreConstant bool) {
orderedList = make([]*expression.Assignment, 0, len(list))
allAssignmentsAreConstant = true
for _, assign := range list {
idx, err := expression.FindFieldName(plan.OutputNames(), assign.Column)
if idx == -1 || err != nil {
return nil, true
}
col := plan.Schema().Columns[idx]
newAssign := &expression.Assignment{
Col: col,
ColName: plan.OutputNames()[idx].ColName,
}
expr, err := expression.RewriteSimpleExprWithNames(ctx, assign.Expr, plan.Schema(), plan.OutputNames())
if err != nil {
return nil, true
}
expr = expression.BuildCastFunction(ctx, expr, col.GetType())
if allAssignmentsAreConstant {
_, isConst := expr.(*expression.Constant)
allAssignmentsAreConstant = isConst
}
newAssign.Expr, err = expr.ResolveIndices(plan.Schema())
if err != nil {
return nil, true
}
orderedList = append(orderedList, newAssign)
}
return orderedList, allAssignmentsAreConstant
}
func tryDeletePointPlan(ctx sessionctx.Context, delStmt *ast.DeleteStmt) Plan {
if delStmt.IsMultiTable {
return nil
}
selStmt := &ast.SelectStmt{
Fields: &ast.FieldList{},
From: delStmt.TableRefs,
Where: delStmt.Where,
OrderBy: delStmt.Order,
Limit: delStmt.Limit,
}
if pointGet := tryPointGetPlan(ctx, selStmt, true); pointGet != nil {
if pointGet.IsTableDual {
return PhysicalTableDual{
names: pointGet.outputNames,
}.Init(ctx, &property.StatsInfo{}, 0)
}
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
pointGet.Lock, pointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointDeletePlan(ctx, pointGet, pointGet.dbName, pointGet.TblInfo)
}
if batchPointGet := tryWhereIn2BatchPointGet(ctx, selStmt); batchPointGet != nil {
if ctx.GetSessionVars().TxnCtx.IsPessimistic {
batchPointGet.Lock, batchPointGet.LockWaitTime = getLockWaitTime(ctx, &ast.SelectLockInfo{LockType: ast.SelectLockForUpdate})
}
return buildPointDeletePlan(ctx, batchPointGet, batchPointGet.dbName, batchPointGet.TblInfo)
}
return nil
}
func buildPointDeletePlan(ctx sessionctx.Context, pointPlan PhysicalPlan, dbName string, tbl *model.TableInfo) Plan {
if checkFastPlanPrivilege(ctx, dbName, tbl.Name.L, mysql.SelectPriv, mysql.DeletePriv) != nil {
return nil
}
handleCols := buildHandleCols(ctx, tbl, pointPlan.Schema())
delPlan := Delete{
SelectPlan: pointPlan,
TblColPosInfos: TblColPosInfoSlice{
TblColPosInfo{
TblID: tbl.ID,
Start: 0,
End: pointPlan.Schema().Len(),
HandleCols: handleCols,
},
},
}.Init(ctx)
return delPlan
}
func findCol(tbl *model.TableInfo, colName *ast.ColumnName) *model.ColumnInfo {
for _, col := range tbl.Columns {
if col.Name.L == colName.Name.L {
return col
}
}
return nil
}
func colInfoToColumn(col *model.ColumnInfo, idx int) *expression.Column {
return &expression.Column{
RetType: col.FieldType.Clone(),
ID: col.ID,
UniqueID: int64(col.Offset),
Index: idx,
OrigName: col.Name.L,
}
}
func buildHandleCols(ctx sessionctx.Context, tbl *model.TableInfo, schema *expression.Schema) HandleCols {
// fields len is 0 for update and delete.
if tbl.PKIsHandle {
for i, col := range tbl.Columns {
if mysql.HasPriKeyFlag(col.Flag) {
return &IntHandleCols{col: schema.Columns[i]}
}
}
}
if tbl.IsCommonHandle {
pkIdx := tables.FindPrimaryIndex(tbl)
return NewCommonHandleCols(ctx.GetSessionVars().StmtCtx, tbl, pkIdx, schema.Columns)
}
handleCol := colInfoToColumn(model.NewExtraHandleColInfo(), schema.Len())
schema.Append(handleCol)
return &IntHandleCols{col: handleCol}
}
func getPartitionInfo(ctx sessionctx.Context, tbl *model.TableInfo, pairs []nameValuePair) (*model.PartitionDefinition, int) {
partitionColName := getHashPartitionColumnName(ctx, tbl)
if partitionColName == nil {
return nil, 0
}
pi := tbl.Partition
for i, pair := range pairs {
if partitionColName.Name.L == pair.colName {
val := pair.value.GetInt64()
pos := math.Abs(val % int64(pi.Num))
return &pi.Definitions[pos], i
}
}
return nil, 0
}
func findPartitionIdx(idxInfo *model.IndexInfo, pos int, pairs []nameValuePair) int {
for i, idxCol := range idxInfo.Columns {
if idxCol.Name.L == pairs[pos].colName {
return i
}
}
return 0
}
// getPartitionColumnPos gets the partition column's position in the index.
func getPartitionColumnPos(idx *model.IndexInfo, partitionColName *ast.ColumnName) int {
if partitionColName == nil {
return 0
}
for i, idxCol := range idx.Columns {
if partitionColName.Name.L == idxCol.Name.L {
return i
}
}
panic("unique index must include all partition columns")
}
func getHashPartitionColumnName(ctx sessionctx.Context, tbl *model.TableInfo) *ast.ColumnName {
pi := tbl.GetPartitionInfo()
if pi == nil {
return nil
}
if pi.Type != model.PartitionTypeHash {
return nil
}
is := infoschema.GetInfoSchema(ctx)
table, ok := is.TableByID(tbl.ID)
if !ok {
return nil
}
// PartitionExpr don't need columns and names for hash partition.
partitionExpr, err := table.(partitionTable).PartitionExpr()
if err != nil {
return nil
}
expr := partitionExpr.OrigExpr
col, ok := expr.(*ast.ColumnNameExpr)
if !ok {
return nil
}
return col.Name
}
| planner/core/point_get_plan.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00546275544911623,
0.0002928764733951539,
0.00016036925080697984,
0.00017033253971021622,
0.0006944581400603056
] |
{
"id": 12,
"code_window": [
"\t// txn3 should locks k2 successfully using no wait\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\", \"return\"), IsNil)\n",
"\terr = txn3.LockKeys(context.Background(), lockCtx3, k2)\n",
"\tc.Assert(failpoint.Disable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\"), IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1020
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package json
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math"
"sort"
"unicode/utf8"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/stringutil"
)
// Type returns type of BinaryJSON as string.
func (bj BinaryJSON) Type() string {
switch bj.TypeCode {
case TypeCodeObject:
return "OBJECT"
case TypeCodeArray:
return "ARRAY"
case TypeCodeLiteral:
switch bj.Value[0] {
case LiteralNil:
return "NULL"
default:
return "BOOLEAN"
}
case TypeCodeInt64:
return "INTEGER"
case TypeCodeUint64:
return "UNSIGNED INTEGER"
case TypeCodeFloat64:
return "DOUBLE"
case TypeCodeString:
return "STRING"
default:
msg := fmt.Sprintf(unknownTypeCodeErrorMsg, bj.TypeCode)
panic(msg)
}
}
// Unquote is for JSON_UNQUOTE.
func (bj BinaryJSON) Unquote() (string, error) {
switch bj.TypeCode {
case TypeCodeString:
str := string(hack.String(bj.GetString()))
return UnquoteString(str)
default:
return bj.String(), nil
}
}
// UnquoteString remove quotes in a string,
// including the quotes at the head and tail of string.
func UnquoteString(str string) (string, error) {
strLen := len(str)
if strLen < 2 {
return str, nil
}
head, tail := str[0], str[strLen-1]
if head == '"' && tail == '"' {
// Remove prefix and suffix '"' before unquoting
return unquoteString(str[1 : strLen-1])
}
// if value is not double quoted, do nothing
return str, nil
}
// unquoteString recognizes the escape sequences shown in:
// https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html#json-unquote-character-escape-sequences
func unquoteString(s string) (string, error) {
ret := new(bytes.Buffer)
for i := 0; i < len(s); i++ {
if s[i] == '\\' {
i++
if i == len(s) {
return "", errors.New("Missing a closing quotation mark in string")
}
switch s[i] {
case '"':
ret.WriteByte('"')
case 'b':
ret.WriteByte('\b')
case 'f':
ret.WriteByte('\f')
case 'n':
ret.WriteByte('\n')
case 'r':
ret.WriteByte('\r')
case 't':
ret.WriteByte('\t')
case '\\':
ret.WriteByte('\\')
case 'u':
if i+4 > len(s) {
return "", errors.Errorf("Invalid unicode: %s", s[i+1:])
}
char, size, err := decodeEscapedUnicode(hack.Slice(s[i+1 : i+5]))
if err != nil {
return "", errors.Trace(err)
}
ret.Write(char[0:size])
i += 4
default:
// For all other escape sequences, backslash is ignored.
ret.WriteByte(s[i])
}
} else {
ret.WriteByte(s[i])
}
}
return ret.String(), nil
}
// decodeEscapedUnicode decodes unicode into utf8 bytes specified in RFC 3629.
// According RFC 3629, the max length of utf8 characters is 4 bytes.
// And MySQL use 4 bytes to represent the unicode which must be in [0, 65536).
func decodeEscapedUnicode(s []byte) (char [4]byte, size int, err error) {
size, err = hex.Decode(char[0:2], s)
if err != nil || size != 2 {
// The unicode must can be represented in 2 bytes.
return char, 0, errors.Trace(err)
}
unicode := binary.BigEndian.Uint16(char[0:2])
size = utf8.RuneLen(rune(unicode))
utf8.EncodeRune(char[0:size], rune(unicode))
return
}
// quoteString escapes interior quote and other characters for JSON_QUOTE
// https://dev.mysql.com/doc/refman/5.7/en/json-creation-functions.html#function_json-quote
// TODO: add JSON_QUOTE builtin
func quoteString(s string) string {
var escapeByteMap = map[byte]string{
'\\': "\\\\",
'"': "\\\"",
'\b': "\\b",
'\f': "\\f",
'\n': "\\n",
'\r': "\\r",
'\t': "\\t",
}
ret := new(bytes.Buffer)
ret.WriteByte('"')
start := 0
hasEscaped := false
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
escaped, ok := escapeByteMap[b]
if ok {
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(escaped)
i++
start = i
} else {
i++
}
} else {
c, size := utf8.DecodeRune([]byte(s[i:]))
if c == utf8.RuneError && size == 1 { // refer to codes of `binary.marshalStringTo`
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
}
if start < len(s) {
ret.WriteString(s[start:])
}
if hasEscaped {
ret.WriteByte('"')
return ret.String()
}
return ret.String()[1:]
}
// Extract receives several path expressions as arguments, matches them in bj, and returns:
// ret: target JSON matched any path expressions. maybe autowrapped as an array.
// found: true if any path expressions matched.
func (bj BinaryJSON) Extract(pathExprList []PathExpression) (ret BinaryJSON, found bool) {
buf := make([]BinaryJSON, 0, 1)
for _, pathExpr := range pathExprList {
buf = bj.extractTo(buf, pathExpr)
}
if len(buf) == 0 {
found = false
} else if len(pathExprList) == 1 && len(buf) == 1 {
// If pathExpr contains asterisks, len(elemList) won't be 1
// even if len(pathExprList) equals to 1.
found = true
ret = buf[0]
} else {
found = true
ret = buildBinaryArray(buf)
}
return
}
func (bj BinaryJSON) extractTo(buf []BinaryJSON, pathExpr PathExpression) []BinaryJSON {
if len(pathExpr.legs) == 0 {
return append(buf, bj)
}
currentLeg, subPathExpr := pathExpr.popOneLeg()
if currentLeg.typ == pathLegIndex {
if bj.TypeCode != TypeCodeArray {
if currentLeg.arrayIndex <= 0 && currentLeg.arrayIndex != arrayIndexAsterisk {
buf = bj.extractTo(buf, subPathExpr)
}
return buf
}
elemCount := bj.GetElemCount()
if currentLeg.arrayIndex == arrayIndexAsterisk {
for i := 0; i < elemCount; i++ {
buf = bj.arrayGetElem(i).extractTo(buf, subPathExpr)
}
} else if currentLeg.arrayIndex < elemCount {
buf = bj.arrayGetElem(currentLeg.arrayIndex).extractTo(buf, subPathExpr)
}
} else if currentLeg.typ == pathLegKey && bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
if currentLeg.dotKey == "*" {
for i := 0; i < elemCount; i++ {
buf = bj.objectGetVal(i).extractTo(buf, subPathExpr)
}
} else {
child, ok := bj.objectSearchKey(hack.Slice(currentLeg.dotKey))
if ok {
buf = child.extractTo(buf, subPathExpr)
}
}
} else if currentLeg.typ == pathLegDoubleAsterisk {
buf = bj.extractTo(buf, subPathExpr)
if bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
buf = bj.arrayGetElem(i).extractTo(buf, pathExpr)
}
} else if bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
buf = bj.objectGetVal(i).extractTo(buf, pathExpr)
}
}
}
return buf
}
func (bj BinaryJSON) objectSearchKey(key []byte) (BinaryJSON, bool) {
elemCount := bj.GetElemCount()
idx := sort.Search(elemCount, func(i int) bool {
return bytes.Compare(bj.objectGetKey(i), key) >= 0
})
if idx < elemCount && bytes.Equal(bj.objectGetKey(idx), key) {
return bj.objectGetVal(idx), true
}
return BinaryJSON{}, false
}
func buildBinaryArray(elems []BinaryJSON) BinaryJSON {
totalSize := headerSize + len(elems)*valEntrySize
for _, elem := range elems {
if elem.TypeCode != TypeCodeLiteral {
totalSize += len(elem.Value)
}
}
buf := make([]byte, headerSize+len(elems)*valEntrySize, totalSize)
endian.PutUint32(buf, uint32(len(elems)))
endian.PutUint32(buf[dataSizeOff:], uint32(totalSize))
buf = buildBinaryElements(buf, headerSize, elems)
return BinaryJSON{TypeCode: TypeCodeArray, Value: buf}
}
func buildBinaryElements(buf []byte, entryStart int, elems []BinaryJSON) []byte {
for i, elem := range elems {
buf[entryStart+i*valEntrySize] = elem.TypeCode
if elem.TypeCode == TypeCodeLiteral {
buf[entryStart+i*valEntrySize+valTypeSize] = elem.Value[0]
} else {
endian.PutUint32(buf[entryStart+i*valEntrySize+valTypeSize:], uint32(len(buf)))
buf = append(buf, elem.Value...)
}
}
return buf
}
func buildBinaryObject(keys [][]byte, elems []BinaryJSON) (BinaryJSON, error) {
totalSize := headerSize + len(elems)*(keyEntrySize+valEntrySize)
for i, elem := range elems {
if elem.TypeCode != TypeCodeLiteral {
totalSize += len(elem.Value)
}
totalSize += len(keys[i])
}
buf := make([]byte, headerSize+len(elems)*(keyEntrySize+valEntrySize), totalSize)
endian.PutUint32(buf, uint32(len(elems)))
endian.PutUint32(buf[dataSizeOff:], uint32(totalSize))
for i, key := range keys {
if len(key) > math.MaxUint16 {
return BinaryJSON{}, ErrJSONObjectKeyTooLong
}
endian.PutUint32(buf[headerSize+i*keyEntrySize:], uint32(len(buf)))
endian.PutUint16(buf[headerSize+i*keyEntrySize+keyLenOff:], uint16(len(key)))
buf = append(buf, key...)
}
entryStart := headerSize + len(elems)*keyEntrySize
buf = buildBinaryElements(buf, entryStart, elems)
return BinaryJSON{TypeCode: TypeCodeObject, Value: buf}, nil
}
// Modify modifies a JSON object by insert, replace or set.
// All path expressions cannot contain * or ** wildcard.
// If any error occurs, the input won't be changed.
func (bj BinaryJSON) Modify(pathExprList []PathExpression, values []BinaryJSON, mt ModifyType) (retj BinaryJSON, err error) {
if len(pathExprList) != len(values) {
// TODO: should return 1582(42000)
return retj, errors.New("Incorrect parameter count")
}
for _, pathExpr := range pathExprList {
if pathExpr.flags.containsAnyAsterisk() {
// TODO: should return 3149(42000)
return retj, errors.New("Invalid path expression")
}
}
for i := 0; i < len(pathExprList); i++ {
pathExpr, value := pathExprList[i], values[i]
modifier := &binaryModifier{bj: bj}
switch mt {
case ModifyInsert:
bj = modifier.insert(pathExpr, value)
case ModifyReplace:
bj = modifier.replace(pathExpr, value)
case ModifySet:
bj = modifier.set(pathExpr, value)
}
if modifier.err != nil {
return BinaryJSON{}, modifier.err
}
}
return bj, nil
}
// ArrayInsert insert a BinaryJSON into the given array cell.
// All path expressions cannot contain * or ** wildcard.
// If any error occurs, the input won't be changed.
func (bj BinaryJSON) ArrayInsert(pathExpr PathExpression, value BinaryJSON) (res BinaryJSON, err error) {
// Check the path is a index
if len(pathExpr.legs) < 1 {
return bj, ErrInvalidJSONPathArrayCell
}
parentPath, lastLeg := pathExpr.popOneLastLeg()
if lastLeg.typ != pathLegIndex {
return bj, ErrInvalidJSONPathArrayCell
}
// Find the target array
obj, exists := bj.Extract([]PathExpression{parentPath})
if !exists || obj.TypeCode != TypeCodeArray {
return bj, nil
}
idx := lastLeg.arrayIndex
count := obj.GetElemCount()
if idx >= count {
idx = count
}
// Insert into the array
newArray := make([]BinaryJSON, 0, count+1)
for i := 0; i < idx; i++ {
elem := obj.arrayGetElem(i)
newArray = append(newArray, elem)
}
newArray = append(newArray, value)
for i := idx; i < count; i++ {
elem := obj.arrayGetElem(i)
newArray = append(newArray, elem)
}
obj = buildBinaryArray(newArray)
bj, err = bj.Modify([]PathExpression{parentPath}, []BinaryJSON{obj}, ModifySet)
if err != nil {
return bj, err
}
return bj, nil
}
// Remove removes the elements indicated by pathExprList from JSON.
func (bj BinaryJSON) Remove(pathExprList []PathExpression) (BinaryJSON, error) {
for _, pathExpr := range pathExprList {
if len(pathExpr.legs) == 0 {
// TODO: should return 3153(42000)
return bj, errors.New("Invalid path expression")
}
if pathExpr.flags.containsAnyAsterisk() {
// TODO: should return 3149(42000)
return bj, errors.New("Invalid path expression")
}
modifer := &binaryModifier{bj: bj}
bj = modifer.remove(pathExpr)
if modifer.err != nil {
return BinaryJSON{}, modifer.err
}
}
return bj, nil
}
type binaryModifier struct {
bj BinaryJSON
modifyPtr *byte
modifyValue BinaryJSON
err error
}
func (bm *binaryModifier) set(path PathExpression, newBj BinaryJSON) BinaryJSON {
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, path)
if len(result) > 0 {
bm.modifyPtr = &result[0].Value[0]
bm.modifyValue = newBj
return bm.rebuild()
}
bm.doInsert(path, newBj)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
func (bm *binaryModifier) replace(path PathExpression, newBj BinaryJSON) BinaryJSON {
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, path)
if len(result) == 0 {
return bm.bj
}
bm.modifyPtr = &result[0].Value[0]
bm.modifyValue = newBj
return bm.rebuild()
}
func (bm *binaryModifier) insert(path PathExpression, newBj BinaryJSON) BinaryJSON {
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, path)
if len(result) > 0 {
return bm.bj
}
bm.doInsert(path, newBj)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
// doInsert inserts the newBj to its parent, and builds the new parent.
func (bm *binaryModifier) doInsert(path PathExpression, newBj BinaryJSON) {
parentPath, lastLeg := path.popOneLastLeg()
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, parentPath)
if len(result) == 0 {
return
}
parentBj := result[0]
if lastLeg.typ == pathLegIndex {
bm.modifyPtr = &parentBj.Value[0]
if parentBj.TypeCode != TypeCodeArray {
bm.modifyValue = buildBinaryArray([]BinaryJSON{parentBj, newBj})
return
}
elemCount := parentBj.GetElemCount()
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
elems = append(elems, parentBj.arrayGetElem(i))
}
elems = append(elems, newBj)
bm.modifyValue = buildBinaryArray(elems)
return
}
if parentBj.TypeCode != TypeCodeObject {
return
}
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
insertKey := hack.Slice(lastLeg.dotKey)
insertIdx := sort.Search(elemCount, func(i int) bool {
return bytes.Compare(parentBj.objectGetKey(i), insertKey) >= 0
})
keys := make([][]byte, 0, elemCount+1)
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
if i == insertIdx {
keys = append(keys, insertKey)
elems = append(elems, newBj)
}
keys = append(keys, parentBj.objectGetKey(i))
elems = append(elems, parentBj.objectGetVal(i))
}
if insertIdx == elemCount {
keys = append(keys, insertKey)
elems = append(elems, newBj)
}
bm.modifyValue, bm.err = buildBinaryObject(keys, elems)
return
}
func (bm *binaryModifier) remove(path PathExpression) BinaryJSON {
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, path)
if len(result) == 0 {
return bm.bj
}
bm.doRemove(path)
if bm.err != nil {
return BinaryJSON{}
}
return bm.rebuild()
}
func (bm *binaryModifier) doRemove(path PathExpression) {
parentPath, lastLeg := path.popOneLastLeg()
result := make([]BinaryJSON, 0, 1)
result = bm.bj.extractTo(result, parentPath)
if len(result) == 0 {
return
}
parentBj := result[0]
if lastLeg.typ == pathLegIndex {
if parentBj.TypeCode != TypeCodeArray {
return
}
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
elems := make([]BinaryJSON, 0, elemCount-1)
for i := 0; i < elemCount; i++ {
if i != lastLeg.arrayIndex {
elems = append(elems, parentBj.arrayGetElem(i))
}
}
bm.modifyValue = buildBinaryArray(elems)
return
}
if parentBj.TypeCode != TypeCodeObject {
return
}
bm.modifyPtr = &parentBj.Value[0]
elemCount := parentBj.GetElemCount()
removeKey := hack.Slice(lastLeg.dotKey)
keys := make([][]byte, 0, elemCount+1)
elems := make([]BinaryJSON, 0, elemCount+1)
for i := 0; i < elemCount; i++ {
key := parentBj.objectGetKey(i)
if !bytes.Equal(key, removeKey) {
keys = append(keys, parentBj.objectGetKey(i))
elems = append(elems, parentBj.objectGetVal(i))
}
}
bm.modifyValue, bm.err = buildBinaryObject(keys, elems)
return
}
// rebuild merges the old and the modified JSON into a new BinaryJSON
func (bm *binaryModifier) rebuild() BinaryJSON {
buf := make([]byte, 0, len(bm.bj.Value)+len(bm.modifyValue.Value))
value, tpCode := bm.rebuildTo(buf)
return BinaryJSON{TypeCode: tpCode, Value: value}
}
func (bm *binaryModifier) rebuildTo(buf []byte) ([]byte, TypeCode) {
if bm.modifyPtr == &bm.bj.Value[0] {
bm.modifyPtr = nil
return append(buf, bm.modifyValue.Value...), bm.modifyValue.TypeCode
} else if bm.modifyPtr == nil {
return append(buf, bm.bj.Value...), bm.bj.TypeCode
}
bj := bm.bj
switch bj.TypeCode {
case TypeCodeLiteral, TypeCodeInt64, TypeCodeUint64, TypeCodeFloat64, TypeCodeString:
return append(buf, bj.Value...), bj.TypeCode
}
docOff := len(buf)
elemCount := bj.GetElemCount()
var valEntryStart int
if bj.TypeCode == TypeCodeArray {
copySize := headerSize + elemCount*valEntrySize
valEntryStart = headerSize
buf = append(buf, bj.Value[:copySize]...)
} else {
copySize := headerSize + elemCount*(keyEntrySize+valEntrySize)
valEntryStart = headerSize + elemCount*keyEntrySize
buf = append(buf, bj.Value[:copySize]...)
if elemCount > 0 {
firstKeyOff := int(endian.Uint32(bj.Value[headerSize:]))
lastKeyOff := int(endian.Uint32(bj.Value[headerSize+(elemCount-1)*keyEntrySize:]))
lastKeyLen := int(endian.Uint16(bj.Value[headerSize+(elemCount-1)*keyEntrySize+keyLenOff:]))
buf = append(buf, bj.Value[firstKeyOff:lastKeyOff+lastKeyLen]...)
}
}
for i := 0; i < elemCount; i++ {
valEntryOff := valEntryStart + i*valEntrySize
elem := bj.valEntryGet(valEntryOff)
bm.bj = elem
var tpCode TypeCode
valOff := len(buf) - docOff
buf, tpCode = bm.rebuildTo(buf)
buf[docOff+valEntryOff] = tpCode
if tpCode == TypeCodeLiteral {
lastIdx := len(buf) - 1
endian.PutUint32(buf[docOff+valEntryOff+valTypeSize:], uint32(buf[lastIdx]))
buf = buf[:lastIdx]
} else {
endian.PutUint32(buf[docOff+valEntryOff+valTypeSize:], uint32(valOff))
}
}
endian.PutUint32(buf[docOff+dataSizeOff:], uint32(len(buf)-docOff))
return buf, bj.TypeCode
}
// floatEpsilon is the acceptable error quantity when comparing two float numbers.
const floatEpsilon = 1.e-8
// compareFloat64 returns an integer comparing the float64 x to y,
// allowing precision loss.
func compareFloat64PrecisionLoss(x, y float64) int {
if x-y < floatEpsilon && y-x < floatEpsilon {
return 0
} else if x-y < 0 {
return -1
}
return 1
}
func compareInt64(x int64, y int64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareFloat64(x float64, y float64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareUint64(x uint64, y uint64) int {
if x < y {
return -1
} else if x == y {
return 0
}
return 1
}
func compareInt64Uint64(x int64, y uint64) int {
if x < 0 {
return -1
}
return compareUint64(uint64(x), y)
}
func compareFloat64Int64(x float64, y int64) int {
return compareFloat64PrecisionLoss(x, float64(y))
}
func compareFloat64Uint64(x float64, y uint64) int {
return compareFloat64PrecisionLoss(x, float64(y))
}
// CompareBinary compares two binary json objects. Returns -1 if left < right,
// 0 if left == right, else returns 1.
func CompareBinary(left, right BinaryJSON) int {
precedence1 := jsonTypePrecedences[left.Type()]
precedence2 := jsonTypePrecedences[right.Type()]
var cmp int
if precedence1 == precedence2 {
if precedence1 == jsonTypePrecedences["NULL"] {
// for JSON null.
cmp = 0
}
switch left.TypeCode {
case TypeCodeLiteral:
// false is less than true.
cmp = int(right.Value[0]) - int(left.Value[0])
case TypeCodeInt64:
switch right.TypeCode {
case TypeCodeInt64:
cmp = compareInt64(left.GetInt64(), right.GetInt64())
case TypeCodeUint64:
cmp = compareInt64Uint64(left.GetInt64(), right.GetUint64())
case TypeCodeFloat64:
cmp = -compareFloat64Int64(right.GetFloat64(), left.GetInt64())
}
case TypeCodeUint64:
switch right.TypeCode {
case TypeCodeInt64:
cmp = -compareInt64Uint64(right.GetInt64(), left.GetUint64())
case TypeCodeUint64:
cmp = compareUint64(left.GetUint64(), right.GetUint64())
case TypeCodeFloat64:
cmp = -compareFloat64Uint64(right.GetFloat64(), left.GetUint64())
}
case TypeCodeFloat64:
switch right.TypeCode {
case TypeCodeInt64:
cmp = compareFloat64Int64(left.GetFloat64(), right.GetInt64())
case TypeCodeUint64:
cmp = compareFloat64Uint64(left.GetFloat64(), right.GetUint64())
case TypeCodeFloat64:
cmp = compareFloat64(left.GetFloat64(), right.GetFloat64())
}
case TypeCodeString:
cmp = bytes.Compare(left.GetString(), right.GetString())
case TypeCodeArray:
leftCount := left.GetElemCount()
rightCount := right.GetElemCount()
for i := 0; i < leftCount && i < rightCount; i++ {
elem1 := left.arrayGetElem(i)
elem2 := right.arrayGetElem(i)
cmp = CompareBinary(elem1, elem2)
if cmp != 0 {
return cmp
}
}
cmp = leftCount - rightCount
case TypeCodeObject:
// reference:
// https://github.com/mysql/mysql-server/blob/ee4455a33b10f1b1886044322e4893f587b319ed/sql/json_dom.cc#L2561
leftCount, rightCount := left.GetElemCount(), right.GetElemCount()
cmp := compareInt64(int64(leftCount), int64(rightCount))
if cmp != 0 {
return cmp
}
for i := 0; i < leftCount; i++ {
leftKey, rightKey := left.objectGetKey(i), right.objectGetKey(i)
cmp = bytes.Compare(leftKey, rightKey)
if cmp != 0 {
return cmp
}
cmp = CompareBinary(left.objectGetVal(i), right.objectGetVal(i))
if cmp != 0 {
return cmp
}
}
}
} else {
cmp = precedence1 - precedence2
}
return cmp
}
// MergeBinary merges multiple BinaryJSON into one according the following rules:
// 1) adjacent arrays are merged to a single array;
// 2) adjacent object are merged to a single object;
// 3) a scalar value is autowrapped as an array before merge;
// 4) an adjacent array and object are merged by autowrapping the object as an array.
func MergeBinary(bjs []BinaryJSON) BinaryJSON {
var remain = bjs
var objects []BinaryJSON
var results []BinaryJSON
for len(remain) > 0 {
if remain[0].TypeCode != TypeCodeObject {
results = append(results, remain[0])
remain = remain[1:]
} else {
objects, remain = getAdjacentObjects(remain)
results = append(results, mergeBinaryObject(objects))
}
}
if len(results) == 1 {
return results[0]
}
return mergeBinaryArray(results)
}
func getAdjacentObjects(bjs []BinaryJSON) (objects, remain []BinaryJSON) {
for i := 0; i < len(bjs); i++ {
if bjs[i].TypeCode != TypeCodeObject {
return bjs[:i], bjs[i:]
}
}
return bjs, nil
}
func mergeBinaryArray(elems []BinaryJSON) BinaryJSON {
buf := make([]BinaryJSON, 0, len(elems))
for i := 0; i < len(elems); i++ {
elem := elems[i]
if elem.TypeCode != TypeCodeArray {
buf = append(buf, elem)
} else {
childCount := elem.GetElemCount()
for j := 0; j < childCount; j++ {
buf = append(buf, elem.arrayGetElem(j))
}
}
}
return buildBinaryArray(buf)
}
func mergeBinaryObject(objects []BinaryJSON) BinaryJSON {
keyValMap := make(map[string]BinaryJSON)
keys := make([][]byte, 0, len(keyValMap))
for _, obj := range objects {
elemCount := obj.GetElemCount()
for i := 0; i < elemCount; i++ {
key := obj.objectGetKey(i)
val := obj.objectGetVal(i)
if old, ok := keyValMap[string(key)]; ok {
keyValMap[string(key)] = MergeBinary([]BinaryJSON{old, val})
} else {
keyValMap[string(key)] = val
keys = append(keys, key)
}
}
}
sort.Slice(keys, func(i, j int) bool {
return bytes.Compare(keys[i], keys[j]) < 0
})
values := make([]BinaryJSON, len(keys))
for i, key := range keys {
values[i] = keyValMap[string(key)]
}
binaryObject, err := buildBinaryObject(keys, values)
if err != nil {
panic("mergeBinaryObject should never panic, please contact the TiDB team for help")
}
return binaryObject
}
// PeekBytesAsJSON trys to peek some bytes from b, until
// we can deserialize a JSON from those bytes.
func PeekBytesAsJSON(b []byte) (n int, err error) {
if len(b) <= 0 {
err = errors.New("Cant peek from empty bytes")
return
}
switch c := b[0]; c {
case TypeCodeObject, TypeCodeArray:
if len(b) >= valTypeSize+headerSize {
size := endian.Uint32(b[valTypeSize+dataSizeOff:])
n = valTypeSize + int(size)
return
}
case TypeCodeString:
strLen, lenLen := binary.Uvarint(b[valTypeSize:])
return valTypeSize + int(strLen) + lenLen, nil
case TypeCodeInt64, TypeCodeUint64, TypeCodeFloat64:
n = valTypeSize + 8
return
case TypeCodeLiteral:
n = valTypeSize + 1
return
}
err = errors.New("Invalid JSON bytes")
return
}
// ContainsBinary check whether JSON document contains specific target according the following rules:
// 1) object contains a target object if and only if every key is contained in source object and the value associated with the target key is contained in the value associated with the source key;
// 2) array contains a target nonarray if and only if the target is contained in some element of the array;
// 3) array contains a target array if and only if every element is contained in some element of the array;
// 4) scalar contains a target scalar if and only if they are comparable and are equal;
func ContainsBinary(obj, target BinaryJSON) bool {
switch obj.TypeCode {
case TypeCodeObject:
if target.TypeCode == TypeCodeObject {
len := target.GetElemCount()
for i := 0; i < len; i++ {
key := target.objectGetKey(i)
val := target.objectGetVal(i)
if exp, exists := obj.objectSearchKey(key); !exists || !ContainsBinary(exp, val) {
return false
}
}
return true
}
return false
case TypeCodeArray:
if target.TypeCode == TypeCodeArray {
len := target.GetElemCount()
for i := 0; i < len; i++ {
if !ContainsBinary(obj, target.arrayGetElem(i)) {
return false
}
}
return true
}
len := obj.GetElemCount()
for i := 0; i < len; i++ {
if ContainsBinary(obj.arrayGetElem(i), target) {
return true
}
}
return false
default:
return CompareBinary(obj, target) == 0
}
}
// GetElemDepth for JSON_DEPTH
// Returns the maximum depth of a JSON document
// rules referenced by MySQL JSON_DEPTH function
// [https://dev.mysql.com/doc/refman/5.7/en/json-attribute-functions.html#function_json-depth]
// 1) An empty array, empty object, or scalar value has depth 1.
// 2) A nonempty array containing only elements of depth 1 or nonempty object containing only member values of depth 1 has depth 2.
// 3) Otherwise, a JSON document has depth greater than 2.
// e.g. depth of '{}', '[]', 'true': 1
// e.g. depth of '[10, 20]', '[[], {}]': 2
// e.g. depth of '[10, {"a": 20}]': 3
func (bj BinaryJSON) GetElemDepth() int {
switch bj.TypeCode {
case TypeCodeObject:
len := bj.GetElemCount()
maxDepth := 0
for i := 0; i < len; i++ {
obj := bj.objectGetVal(i)
depth := obj.GetElemDepth()
if depth > maxDepth {
maxDepth = depth
}
}
return maxDepth + 1
case TypeCodeArray:
len := bj.GetElemCount()
maxDepth := 0
for i := 0; i < len; i++ {
obj := bj.arrayGetElem(i)
depth := obj.GetElemDepth()
if depth > maxDepth {
maxDepth = depth
}
}
return maxDepth + 1
default:
return 1
}
}
// Search for JSON_Search
// rules referenced by MySQL JSON_SEARCH function
// [https://dev.mysql.com/doc/refman/5.7/en/json-search-functions.html#function_json-search]
func (bj BinaryJSON) Search(containType string, search string, escape byte, pathExpres []PathExpression) (res BinaryJSON, isNull bool, err error) {
if containType != ContainsPathOne && containType != ContainsPathAll {
return res, true, ErrInvalidJSONPath
}
patChars, patTypes := stringutil.CompilePattern(search, escape)
result := make([]interface{}, 0)
walkFn := func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error) {
if bj.TypeCode == TypeCodeString && stringutil.DoMatch(string(bj.GetString()), patChars, patTypes) {
result = append(result, fullpath.String())
if containType == ContainsPathOne {
return true, nil
}
}
return false, nil
}
if len(pathExpres) != 0 {
err := bj.Walk(walkFn, pathExpres...)
if err != nil {
return res, true, err
}
} else {
err := bj.Walk(walkFn)
if err != nil {
return res, true, err
}
}
switch len(result) {
case 0:
return res, true, nil
case 1:
return CreateBinary(result[0]), false, nil
default:
return CreateBinary(result), false, nil
}
}
// extractCallbackFn: the type of CALLBACK function for extractToCallback
type extractCallbackFn func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error)
// extractToCallback: callback alternative of extractTo
// would be more effective when walk through the whole JSON is unnecessary
// NOTICE: path [0] & [*] for JSON object other than array is INVALID, which is different from extractTo.
func (bj BinaryJSON) extractToCallback(pathExpr PathExpression, callbackFn extractCallbackFn, fullpath PathExpression) (stop bool, err error) {
if len(pathExpr.legs) == 0 {
return callbackFn(fullpath, bj)
}
currentLeg, subPathExpr := pathExpr.popOneLeg()
if currentLeg.typ == pathLegIndex && bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
if currentLeg.arrayIndex == arrayIndexAsterisk {
for i := 0; i < elemCount; i++ {
// buf = bj.arrayGetElem(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneIndexLeg(i)
stop, err = bj.arrayGetElem(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if currentLeg.arrayIndex < elemCount {
// buf = bj.arrayGetElem(currentLeg.arrayIndex).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneIndexLeg(currentLeg.arrayIndex)
stop, err = bj.arrayGetElem(currentLeg.arrayIndex).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if currentLeg.typ == pathLegKey && bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
if currentLeg.dotKey == "*" {
for i := 0; i < elemCount; i++ {
// buf = bj.objectGetVal(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else {
child, ok := bj.objectSearchKey(hack.Slice(currentLeg.dotKey))
if ok {
// buf = child.extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(currentLeg.dotKey)
stop, err = child.extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
} else if currentLeg.typ == pathLegDoubleAsterisk {
// buf = bj.extractTo(buf, subPathExpr)
stop, err = bj.extractToCallback(subPathExpr, callbackFn, fullpath)
if stop || err != nil {
return
}
if bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
// buf = bj.arrayGetElem(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneIndexLeg(i)
stop, err = bj.arrayGetElem(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
// buf = bj.objectGetVal(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
}
return false, nil
}
// BinaryJSONWalkFunc is used as callback function for BinaryJSON.Walk
type BinaryJSONWalkFunc func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error)
// Walk traverse BinaryJSON objects
func (bj BinaryJSON) Walk(walkFn BinaryJSONWalkFunc, pathExprList ...PathExpression) (err error) {
pathSet := make(map[string]bool)
var doWalk extractCallbackFn
doWalk = func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error) {
pathStr := fullpath.String()
if _, ok := pathSet[pathStr]; ok {
return false, nil
}
stop, err = walkFn(fullpath, bj)
pathSet[pathStr] = true
if stop || err != nil {
return
}
if bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneIndexLeg(i)
stop, err = doWalk(path, bj.arrayGetElem(i))
if stop || err != nil {
return
}
}
} else if bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = doWalk(path, bj.objectGetVal(i))
if stop || err != nil {
return
}
}
}
return false, nil
}
fullpath := PathExpression{legs: make([]pathLeg, 0, 32), flags: pathExpressionFlag(0)}
if len(pathExprList) > 0 {
for _, pathExpr := range pathExprList {
var stop bool
stop, err = bj.extractToCallback(pathExpr, doWalk, fullpath)
if stop || err != nil {
return err
}
}
} else {
_, err = doWalk(fullpath, bj)
if err != nil {
return
}
}
return nil
}
| types/json/binary_functions.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.004021415952593088,
0.0002402742247795686,
0.00015933097165543586,
0.00017207738710567355,
0.0004257624677848071
] |
{
"id": 12,
"code_window": [
"\t// txn3 should locks k2 successfully using no wait\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\", \"return\"), IsNil)\n",
"\terr = txn3.LockKeys(context.Background(), lockCtx3, k2)\n",
"\tc.Assert(failpoint.Disable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\"), IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1020
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"hash"
"hash/fnv"
"runtime/trace"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/expression"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/ranger"
)
// numResChkHold indicates the number of resource chunks that an inner worker
// holds at the same time.
// It's used in 2 cases individually:
// 1. IndexMergeJoin
// 2. IndexNestedLoopHashJoin:
// It's used when IndexNestedLoopHashJoin.keepOuterOrder is true.
// Otherwise, there will be at most `concurrency` resource chunks throughout
// the execution of IndexNestedLoopHashJoin.
const numResChkHold = 4
// IndexNestedLoopHashJoin employs one outer worker and N inner workers to
// execute concurrently. The output order is not promised.
//
// The execution flow is very similar to IndexLookUpReader:
// 1. The outer worker reads N outer rows, builds a task and sends it to the
// inner worker channel.
// 2. The inner worker receives the tasks and does 3 things for every task:
// 1. builds hash table from the outer rows
// 2. builds key ranges from outer rows and fetches inner rows
// 3. probes the hash table and sends the join result to the main thread channel.
// Note: step 1 and step 2 runs concurrently.
// 3. The main thread receives the join results.
type IndexNestedLoopHashJoin struct {
IndexLookUpJoin
resultCh chan *indexHashJoinResult
joinChkResourceCh []chan *chunk.Chunk
// We build individual joiner for each inner worker when using chunk-based
// execution, to avoid the concurrency of joiner.chk and joiner.selected.
joiners []joiner
keepOuterOrder bool
curTask *indexHashJoinTask
// taskCh is only used when `keepOuterOrder` is true.
taskCh chan *indexHashJoinTask
stats *indexLookUpJoinRuntimeStats
}
type indexHashJoinOuterWorker struct {
outerWorker
innerCh chan *indexHashJoinTask
keepOuterOrder bool
// taskCh is only used when the outer order needs to be promised.
taskCh chan *indexHashJoinTask
}
type indexHashJoinInnerWorker struct {
innerWorker
matchedOuterPtrs []chunk.RowPtr
joiner joiner
joinChkResourceCh chan *chunk.Chunk
// resultCh is valid only when indexNestedLoopHashJoin do not need to keep
// order. Otherwise, it will be nil.
resultCh chan *indexHashJoinResult
taskCh <-chan *indexHashJoinTask
wg *sync.WaitGroup
joinKeyBuf []byte
outerRowStatus []outerRowStatusFlag
}
type indexHashJoinResult struct {
chk *chunk.Chunk
err error
src chan<- *chunk.Chunk
}
type indexHashJoinTask struct {
*lookUpJoinTask
outerRowStatus [][]outerRowStatusFlag
lookupMap baseHashTable
err error
keepOuterOrder bool
// resultCh is only used when the outer order needs to be promised.
resultCh chan *indexHashJoinResult
// matchedInnerRowPtrs is only valid when the outer order needs to be
// promised. Otherwise, it will be nil.
// len(matchedInnerRowPtrs) equals to
// lookUpJoinTask.outerResult.NumChunks(), and the elements of every
// matchedInnerRowPtrs[chkIdx][rowIdx] indicates the matched inner row ptrs
// of the corresponding outer row.
matchedInnerRowPtrs [][][]chunk.RowPtr
}
// Open implements the IndexNestedLoopHashJoin Executor interface.
func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error {
// Be careful, very dirty hack in this line!!!
// IndexLookUpJoin need to rebuild executor (the dataReaderBuilder) during
// executing. However `executor.Next()` is lazy evaluation when the RecordSet
// result is drained.
// Lazy evaluation means the saved session context may change during executor's
// building and its running.
// A specific sequence for example:
//
// e := buildExecutor() // txn at build time
// recordSet := runStmt(e)
// session.CommitTxn() // txn closed
// recordSet.Next()
// e.dataReaderBuilder.Build() // txn is used again, which is already closed
//
// The trick here is `getSnapshotTS` will cache snapshot ts in the dataReaderBuilder,
// so even txn is destroyed later, the dataReaderBuilder could still use the
// cached snapshot ts to construct DAG.
_, err := e.innerCtx.readerBuilder.getSnapshotTS()
if err != nil {
return err
}
err = e.children[0].Open(ctx)
if err != nil {
return err
}
e.memTracker = memory.NewTracker(e.id, -1)
e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker)
e.innerPtrBytes = make([][]byte, 0, 8)
if e.runtimeStats != nil {
e.stats = &indexLookUpJoinRuntimeStats{}
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
}
e.startWorkers(ctx)
return nil
}
func (e *IndexNestedLoopHashJoin) startWorkers(ctx context.Context) {
concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency()
if e.stats != nil {
e.stats.concurrency = concurrency
}
workerCtx, cancelFunc := context.WithCancel(ctx)
e.cancelFunc = cancelFunc
innerCh := make(chan *indexHashJoinTask, concurrency)
if e.keepOuterOrder {
e.taskCh = make(chan *indexHashJoinTask, concurrency)
}
e.workerWg.Add(1)
ow := e.newOuterWorker(innerCh)
go util.WithRecovery(func() { ow.run(workerCtx) }, e.finishJoinWorkers)
if !e.keepOuterOrder {
e.resultCh = make(chan *indexHashJoinResult, concurrency)
} else {
// When `keepOuterOrder` is true, each task holds their own `resultCh`
// individually, thus we do not need a global resultCh.
e.resultCh = nil
}
e.joinChkResourceCh = make([]chan *chunk.Chunk, concurrency)
for i := 0; i < concurrency; i++ {
if !e.keepOuterOrder {
e.joinChkResourceCh[i] = make(chan *chunk.Chunk, 1)
e.joinChkResourceCh[i] <- newFirstChunk(e)
} else {
e.joinChkResourceCh[i] = make(chan *chunk.Chunk, numResChkHold)
for j := 0; j < numResChkHold; j++ {
e.joinChkResourceCh[i] <- newFirstChunk(e)
}
}
}
e.workerWg.Add(concurrency)
for i := 0; i < concurrency; i++ {
workerID := i
go util.WithRecovery(func() { e.newInnerWorker(innerCh, workerID).run(workerCtx, cancelFunc) }, e.finishJoinWorkers)
}
go e.wait4JoinWorkers()
}
func (e *IndexNestedLoopHashJoin) finishJoinWorkers(r interface{}) {
if r != nil {
err := errors.New(fmt.Sprintf("%v", r))
if !e.keepOuterOrder {
e.resultCh <- &indexHashJoinResult{err: err}
} else {
task := &indexHashJoinTask{err: err}
e.taskCh <- task
}
if e.cancelFunc != nil {
e.cancelFunc()
}
}
e.workerWg.Done()
}
func (e *IndexNestedLoopHashJoin) wait4JoinWorkers() {
e.workerWg.Wait()
if e.resultCh != nil {
close(e.resultCh)
}
if e.taskCh != nil {
close(e.taskCh)
}
}
// Next implements the IndexNestedLoopHashJoin Executor interface.
func (e *IndexNestedLoopHashJoin) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if e.keepOuterOrder {
return e.runInOrder(ctx, req)
}
// unordered run
var (
result *indexHashJoinResult
ok bool
)
select {
case result, ok = <-e.resultCh:
if !ok {
return nil
}
if result.err != nil {
return result.err
}
case <-ctx.Done():
return ctx.Err()
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
func (e *IndexNestedLoopHashJoin) runInOrder(ctx context.Context, req *chunk.Chunk) error {
var (
result *indexHashJoinResult
ok bool
)
for {
if e.isDryUpTasks(ctx) {
return nil
}
if e.curTask.err != nil {
return e.curTask.err
}
select {
case result, ok = <-e.curTask.resultCh:
if !ok {
e.curTask = nil
continue
}
if result.err != nil {
return result.err
}
case <-ctx.Done():
return ctx.Err()
}
req.SwapColumns(result.chk)
result.src <- result.chk
return nil
}
}
// isDryUpTasks indicates whether all the tasks have been processed.
func (e *IndexNestedLoopHashJoin) isDryUpTasks(ctx context.Context) bool {
if e.curTask != nil {
return false
}
var ok bool
select {
case e.curTask, ok = <-e.taskCh:
if !ok {
return true
}
case <-ctx.Done():
return true
}
return false
}
// Close implements the IndexNestedLoopHashJoin Executor interface.
func (e *IndexNestedLoopHashJoin) Close() error {
if e.cancelFunc != nil {
e.cancelFunc()
e.cancelFunc = nil
}
if e.resultCh != nil {
for range e.resultCh {
}
e.resultCh = nil
}
if e.taskCh != nil {
for range e.taskCh {
}
e.taskCh = nil
}
for i := range e.joinChkResourceCh {
close(e.joinChkResourceCh[i])
}
e.joinChkResourceCh = nil
return e.baseExecutor.Close()
}
func (ow *indexHashJoinOuterWorker) run(ctx context.Context) {
defer trace.StartRegion(ctx, "IndexHashJoinOuterWorker").End()
defer close(ow.innerCh)
for {
task, err := ow.buildTask(ctx)
failpoint.Inject("testIndexHashJoinOuterWorkerErr", func() {
err = errors.New("mockIndexHashJoinOuterWorkerErr")
})
if err != nil {
task = &indexHashJoinTask{err: err}
if ow.keepOuterOrder {
task.keepOuterOrder, task.resultCh = true, make(chan *indexHashJoinResult, 1)
ow.pushToChan(ctx, task, ow.taskCh)
}
ow.pushToChan(ctx, task, ow.innerCh)
return
}
if task == nil {
return
}
if finished := ow.pushToChan(ctx, task, ow.innerCh); finished {
return
}
if ow.keepOuterOrder {
failpoint.Inject("testIssue20779", func() {
panic("testIssue20779")
})
if finished := ow.pushToChan(ctx, task, ow.taskCh); finished {
return
}
}
}
}
func (ow *indexHashJoinOuterWorker) buildTask(ctx context.Context) (*indexHashJoinTask, error) {
task, err := ow.outerWorker.buildTask(ctx)
if task == nil || err != nil {
return nil, err
}
var (
resultCh chan *indexHashJoinResult
matchedInnerRowPtrs [][][]chunk.RowPtr
)
if ow.keepOuterOrder {
resultCh = make(chan *indexHashJoinResult, numResChkHold)
matchedInnerRowPtrs = make([][][]chunk.RowPtr, task.outerResult.NumChunks())
for i := range matchedInnerRowPtrs {
matchedInnerRowPtrs[i] = make([][]chunk.RowPtr, task.outerResult.GetChunk(i).NumRows())
}
}
numChks := task.outerResult.NumChunks()
outerRowStatus := make([][]outerRowStatusFlag, numChks)
for i := 0; i < numChks; i++ {
outerRowStatus[i] = make([]outerRowStatusFlag, task.outerResult.GetChunk(i).NumRows())
}
return &indexHashJoinTask{
lookUpJoinTask: task,
outerRowStatus: outerRowStatus,
keepOuterOrder: ow.keepOuterOrder,
resultCh: resultCh,
matchedInnerRowPtrs: matchedInnerRowPtrs,
}, nil
}
func (ow *indexHashJoinOuterWorker) pushToChan(ctx context.Context, task *indexHashJoinTask, dst chan<- *indexHashJoinTask) bool {
select {
case <-ctx.Done():
return true
case dst <- task:
}
return false
}
func (e *IndexNestedLoopHashJoin) newOuterWorker(innerCh chan *indexHashJoinTask) *indexHashJoinOuterWorker {
ow := &indexHashJoinOuterWorker{
outerWorker: outerWorker{
outerCtx: e.outerCtx,
ctx: e.ctx,
executor: e.children[0],
batchSize: 32,
maxBatchSize: e.ctx.GetSessionVars().IndexJoinBatchSize,
parentMemTracker: e.memTracker,
lookup: &e.IndexLookUpJoin,
},
innerCh: innerCh,
keepOuterOrder: e.keepOuterOrder,
taskCh: e.taskCh,
}
return ow
}
func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, workerID int) *indexHashJoinInnerWorker {
// Since multiple inner workers run concurrently, we should copy join's indexRanges for every worker to avoid data race.
copiedRanges := make([]*ranger.Range, 0, len(e.indexRanges))
for _, ran := range e.indexRanges {
copiedRanges = append(copiedRanges, ran.Clone())
}
var innerStats *innerWorkerRuntimeStats
if e.stats != nil {
innerStats = &e.stats.innerWorker
}
iw := &indexHashJoinInnerWorker{
innerWorker: innerWorker{
innerCtx: e.innerCtx,
outerCtx: e.outerCtx,
ctx: e.ctx,
executorChk: chunk.NewChunkWithCapacity(e.innerCtx.rowTypes, e.maxChunkSize),
indexRanges: copiedRanges,
keyOff2IdxOff: e.keyOff2IdxOff,
stats: innerStats,
},
taskCh: taskCh,
joiner: e.joiners[workerID],
joinChkResourceCh: e.joinChkResourceCh[workerID],
resultCh: e.resultCh,
matchedOuterPtrs: make([]chunk.RowPtr, 0, e.maxChunkSize),
joinKeyBuf: make([]byte, 1),
outerRowStatus: make([]outerRowStatusFlag, 0, e.maxChunkSize),
}
if e.lastColHelper != nil {
// nextCwf.TmpConstant needs to be reset for every individual
// inner worker to avoid data race when the inner workers is running
// concurrently.
nextCwf := *e.lastColHelper
nextCwf.TmpConstant = make([]*expression.Constant, len(e.lastColHelper.TmpConstant))
for i := range e.lastColHelper.TmpConstant {
nextCwf.TmpConstant[i] = &expression.Constant{RetType: nextCwf.TargetCol.RetType}
}
iw.nextColCompareFilters = &nextCwf
}
return iw
}
func (iw *indexHashJoinInnerWorker) run(ctx context.Context, cancelFunc context.CancelFunc) {
defer trace.StartRegion(ctx, "IndexHashJoinInnerWorker").End()
var task *indexHashJoinTask
joinResult, ok := iw.getNewJoinResult(ctx)
if !ok {
cancelFunc()
return
}
h, resultCh := fnv.New64(), iw.resultCh
for {
select {
case <-ctx.Done():
return
case task, ok = <-iw.taskCh:
}
if !ok {
break
}
// We need to init resultCh before the err is returned.
if task.keepOuterOrder {
resultCh = task.resultCh
}
if task.err != nil {
joinResult.err = task.err
break
}
err := iw.handleTask(ctx, task, joinResult, h, resultCh)
if err != nil {
joinResult.err = err
break
}
if task.keepOuterOrder {
// We need to get a new result holder here because the old
// `joinResult` hash been sent to the `resultCh` or to the
// `joinChkResourceCh`.
joinResult, ok = iw.getNewJoinResult(ctx)
if !ok {
cancelFunc()
return
}
}
}
failpoint.Inject("testIndexHashJoinInnerWorkerErr", func() {
joinResult.err = errors.New("mockIndexHashJoinInnerWorkerErr")
})
if joinResult.err != nil {
resultCh <- joinResult
return
}
// When task.keepOuterOrder is TRUE(resultCh != iw.resultCh), the last
// joinResult will be checked when the a task has been processed, thus we do
// not need to check it here again.
if resultCh == iw.resultCh && joinResult.chk != nil && joinResult.chk.NumRows() > 0 {
select {
case resultCh <- joinResult:
case <-ctx.Done():
return
}
}
}
func (iw *indexHashJoinInnerWorker) getNewJoinResult(ctx context.Context) (*indexHashJoinResult, bool) {
joinResult := &indexHashJoinResult{
src: iw.joinChkResourceCh,
}
ok := true
select {
case joinResult.chk, ok = <-iw.joinChkResourceCh:
case <-ctx.Done():
return nil, false
}
return joinResult, ok
}
func (iw *indexHashJoinInnerWorker) buildHashTableForOuterResult(ctx context.Context, task *indexHashJoinTask, h hash.Hash64) {
if iw.stats != nil {
start := time.Now()
defer func() {
atomic.AddInt64(&iw.stats.build, int64(time.Since(start)))
}()
}
buf, numChks := make([]byte, 1), task.outerResult.NumChunks()
task.lookupMap = newUnsafeHashTable(task.outerResult.Len())
for chkIdx := 0; chkIdx < numChks; chkIdx++ {
chk := task.outerResult.GetChunk(chkIdx)
numRows := chk.NumRows()
OUTER:
for rowIdx := 0; rowIdx < numRows; rowIdx++ {
if task.outerMatch != nil && !task.outerMatch[chkIdx][rowIdx] {
continue
}
row := chk.GetRow(rowIdx)
hashColIdx := iw.outerCtx.hashCols
for _, i := range hashColIdx {
if row.IsNull(i) {
continue OUTER
}
}
h.Reset()
err := codec.HashChunkRow(iw.ctx.GetSessionVars().StmtCtx, h, row, iw.outerCtx.rowTypes, hashColIdx, buf)
failpoint.Inject("testIndexHashJoinBuildErr", func() {
err = errors.New("mockIndexHashJoinBuildErr")
})
if err != nil {
// This panic will be recovered by the invoker.
panic(err.Error())
}
rowPtr := chunk.RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)}
task.lookupMap.Put(h.Sum64(), rowPtr)
}
}
}
func (iw *indexHashJoinInnerWorker) fetchInnerResults(ctx context.Context, task *lookUpJoinTask) error {
lookUpContents, err := iw.constructLookupContent(task)
if err != nil {
return err
}
return iw.innerWorker.fetchInnerResults(ctx, task, lookUpContents)
}
func (iw *indexHashJoinInnerWorker) handleHashJoinInnerWorkerPanic(r interface{}) {
if r != nil {
iw.resultCh <- &indexHashJoinResult{err: errors.Errorf("%v", r)}
}
iw.wg.Done()
}
func (iw *indexHashJoinInnerWorker) handleTask(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) error {
var joinStartTime time.Time
if iw.stats != nil {
start := time.Now()
defer func() {
endTime := time.Now()
atomic.AddInt64(&iw.stats.totalTime, int64(endTime.Sub(start)))
atomic.AddInt64(&iw.stats.join, int64(endTime.Sub(joinStartTime)))
}()
}
iw.wg = &sync.WaitGroup{}
iw.wg.Add(1)
// TODO(XuHuaiyu): we may always use the smaller side to build the hashtable.
go util.WithRecovery(func() { iw.buildHashTableForOuterResult(ctx, task, h) }, iw.handleHashJoinInnerWorkerPanic)
err := iw.fetchInnerResults(ctx, task.lookUpJoinTask)
if err != nil {
return err
}
iw.wg.Wait()
joinStartTime = time.Now()
if !task.keepOuterOrder {
return iw.doJoinUnordered(ctx, task, joinResult, h, resultCh)
}
return iw.doJoinInOrder(ctx, task, joinResult, h, resultCh)
}
func (iw *indexHashJoinInnerWorker) doJoinUnordered(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) error {
var ok bool
iter := chunk.NewIterator4List(task.innerResult)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {
ok, joinResult = iw.joinMatchedInnerRow2Chunk(ctx, row, task, joinResult, h, iw.joinKeyBuf)
if !ok {
return errors.New("indexHashJoinInnerWorker.doJoinUnordered failed")
}
}
for chkIdx, outerRowStatus := range task.outerRowStatus {
chk := task.outerResult.GetChunk(chkIdx)
for rowIdx, val := range outerRowStatus {
if val == outerRowMatched {
continue
}
iw.joiner.onMissMatch(val == outerRowHasNull, chk.GetRow(rowIdx), joinResult.chk)
if joinResult.chk.IsFull() {
select {
case resultCh <- joinResult:
case <-ctx.Done():
return ctx.Err()
}
joinResult, ok = iw.getNewJoinResult(ctx)
if !ok {
return errors.New("indexHashJoinInnerWorker.doJoinUnordered failed")
}
}
}
}
return nil
}
func (iw *indexHashJoinInnerWorker) getMatchedOuterRows(innerRow chunk.Row, task *indexHashJoinTask, h hash.Hash64, buf []byte) (matchedRows []chunk.Row, matchedRowPtr []chunk.RowPtr, err error) {
h.Reset()
err = codec.HashChunkRow(iw.ctx.GetSessionVars().StmtCtx, h, innerRow, iw.rowTypes, iw.hashCols, buf)
if err != nil {
return nil, nil, err
}
iw.matchedOuterPtrs = task.lookupMap.Get(h.Sum64())
if len(iw.matchedOuterPtrs) == 0 {
return nil, nil, nil
}
joinType := JoinerType(iw.joiner)
isSemiJoin := joinType == plannercore.SemiJoin || joinType == plannercore.LeftOuterSemiJoin
matchedRows = make([]chunk.Row, 0, len(iw.matchedOuterPtrs))
matchedRowPtr = make([]chunk.RowPtr, 0, len(iw.matchedOuterPtrs))
for _, ptr := range iw.matchedOuterPtrs {
outerRow := task.outerResult.GetRow(ptr)
ok, err := codec.EqualChunkRow(iw.ctx.GetSessionVars().StmtCtx, innerRow, iw.rowTypes, iw.keyCols, outerRow, iw.outerCtx.rowTypes, iw.outerCtx.hashCols)
if err != nil {
return nil, nil, err
}
if !ok || (task.outerRowStatus[ptr.ChkIdx][ptr.RowIdx] == outerRowMatched && isSemiJoin) {
continue
}
matchedRows = append(matchedRows, outerRow)
matchedRowPtr = append(matchedRowPtr, chunk.RowPtr{ChkIdx: ptr.ChkIdx, RowIdx: ptr.RowIdx})
}
return matchedRows, matchedRowPtr, nil
}
func (iw *indexHashJoinInnerWorker) joinMatchedInnerRow2Chunk(ctx context.Context, innerRow chunk.Row, task *indexHashJoinTask,
joinResult *indexHashJoinResult, h hash.Hash64, buf []byte) (bool, *indexHashJoinResult) {
matchedOuterRows, matchedOuterRowPtr, err := iw.getMatchedOuterRows(innerRow, task, h, buf)
if err != nil {
joinResult.err = err
return false, joinResult
}
if len(matchedOuterRows) == 0 {
return true, joinResult
}
var (
ok bool
iter = chunk.NewIterator4Slice(matchedOuterRows)
cursor = 0
)
for iter.Begin(); iter.Current() != iter.End(); {
iw.outerRowStatus, err = iw.joiner.tryToMatchOuters(iter, innerRow, joinResult.chk, iw.outerRowStatus)
if err != nil {
joinResult.err = err
return false, joinResult
}
for _, status := range iw.outerRowStatus {
chkIdx, rowIdx := matchedOuterRowPtr[cursor].ChkIdx, matchedOuterRowPtr[cursor].RowIdx
if status == outerRowMatched || task.outerRowStatus[chkIdx][rowIdx] == outerRowUnmatched {
task.outerRowStatus[chkIdx][rowIdx] = status
}
cursor++
}
if joinResult.chk.IsFull() {
select {
case iw.resultCh <- joinResult:
case <-ctx.Done():
}
joinResult, ok = iw.getNewJoinResult(ctx)
if !ok {
return false, joinResult
}
}
}
return true, joinResult
}
func (iw *indexHashJoinInnerWorker) collectMatchedInnerPtrs4OuterRows(ctx context.Context, innerRow chunk.Row, innerRowPtr chunk.RowPtr,
task *indexHashJoinTask, h hash.Hash64, buf []byte) error {
_, matchedOuterRowIdx, err := iw.getMatchedOuterRows(innerRow, task, h, buf)
if err != nil {
return err
}
for _, outerRowPtr := range matchedOuterRowIdx {
chkIdx, rowIdx := outerRowPtr.ChkIdx, outerRowPtr.RowIdx
task.matchedInnerRowPtrs[chkIdx][rowIdx] = append(task.matchedInnerRowPtrs[chkIdx][rowIdx], innerRowPtr)
}
return nil
}
// doJoinInOrder follows the following steps:
// 1. collect all the matched inner row ptrs for every outer row
// 2. do the join work
// 2.1 collect all the matched inner rows using the collected ptrs for every outer row
// 2.2 call tryToMatchInners for every outer row
// 2.3 call onMissMatch when no inner rows are matched
func (iw *indexHashJoinInnerWorker) doJoinInOrder(ctx context.Context, task *indexHashJoinTask, joinResult *indexHashJoinResult, h hash.Hash64, resultCh chan *indexHashJoinResult) (err error) {
defer func() {
if err == nil && joinResult.chk != nil {
if joinResult.chk.NumRows() > 0 {
select {
case resultCh <- joinResult:
case <-ctx.Done():
return
}
} else {
joinResult.src <- joinResult.chk
}
}
close(resultCh)
}()
for i, numChunks := 0, task.innerResult.NumChunks(); i < numChunks; i++ {
for j, chk := 0, task.innerResult.GetChunk(i); j < chk.NumRows(); j++ {
row := chk.GetRow(j)
ptr := chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(j)}
err = iw.collectMatchedInnerPtrs4OuterRows(ctx, row, ptr, task, h, iw.joinKeyBuf)
if err != nil {
return err
}
}
}
// TODO: matchedInnerRowPtrs and matchedInnerRows can be moved to inner worker.
matchedInnerRows := make([]chunk.Row, len(task.matchedInnerRowPtrs))
var hasMatched, hasNull, ok bool
for chkIdx, innerRowPtrs4Chk := range task.matchedInnerRowPtrs {
for outerRowIdx, innerRowPtrs := range innerRowPtrs4Chk {
matchedInnerRows, hasMatched, hasNull = matchedInnerRows[:0], false, false
outerRow := task.outerResult.GetChunk(chkIdx).GetRow(outerRowIdx)
for _, ptr := range innerRowPtrs {
matchedInnerRows = append(matchedInnerRows, task.innerResult.GetRow(ptr))
}
iter := chunk.NewIterator4Slice(matchedInnerRows)
for iter.Begin(); iter.Current() != iter.End(); {
matched, isNull, err := iw.joiner.tryToMatchInners(outerRow, iter, joinResult.chk)
if err != nil {
return err
}
hasMatched, hasNull = matched || hasMatched, isNull || hasNull
if joinResult.chk.IsFull() {
select {
case resultCh <- joinResult:
case <-ctx.Done():
return ctx.Err()
}
joinResult, ok = iw.getNewJoinResult(ctx)
if !ok {
return errors.New("indexHashJoinInnerWorker.doJoinInOrder failed")
}
}
}
if !hasMatched {
iw.joiner.onMissMatch(hasNull, outerRow, joinResult.chk)
}
}
}
return nil
}
| executor/index_lookup_hash_join.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0006778444512747228,
0.000191274841199629,
0.00015970284584909678,
0.00017035877681337297,
0.00007493905286537483
] |
{
"id": 12,
"code_window": [
"\t// txn3 should locks k2 successfully using no wait\n",
"\ttxn3 := s.begin(c)\n",
"\ttxn3.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\tc.Assert(failpoint.Enable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\", \"return\"), IsNil)\n",
"\terr = txn3.LockKeys(context.Background(), lockCtx3, k2)\n",
"\tc.Assert(failpoint.Disable(\"github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL\"), IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1020
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
"context"
"fmt"
"strings"
"sync"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/parser/model"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/testkit"
)
var (
_ = Suite(&testChunkSizeControlSuite{})
)
type testSlowClient struct {
sync.RWMutex
tikv.Client
regionDelay map[uint64]time.Duration
}
func (c *testSlowClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
regionID := req.RegionId
delay := c.GetDelay(regionID)
if req.Type == tikvrpc.CmdCop && delay > 0 {
time.Sleep(delay)
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (c *testSlowClient) SetDelay(regionID uint64, dur time.Duration) {
c.Lock()
defer c.Unlock()
c.regionDelay[regionID] = dur
}
func (c *testSlowClient) GetDelay(regionID uint64) time.Duration {
c.RLock()
defer c.RUnlock()
return c.regionDelay[regionID]
}
// manipulateCluster splits this cluster's region by splitKeys and returns regionIDs after split
func manipulateCluster(cluster cluster.Cluster, splitKeys [][]byte) []uint64 {
if len(splitKeys) == 0 {
return nil
}
region, _ := cluster.GetRegionByKey(splitKeys[0])
for _, key := range splitKeys {
if r, _ := cluster.GetRegionByKey(key); r.Id != region.Id {
panic("all split keys should belong to the same region")
}
}
allRegionIDs := []uint64{region.Id}
for i, key := range splitKeys {
newRegionID, newPeerID := cluster.AllocID(), cluster.AllocID()
cluster.Split(allRegionIDs[i], newRegionID, key, []uint64{newPeerID}, newPeerID)
allRegionIDs = append(allRegionIDs, newRegionID)
}
return allRegionIDs
}
func generateTableSplitKeyForInt(tid int64, splitNum []int) [][]byte {
results := make([][]byte, 0, len(splitNum))
for _, num := range splitNum {
results = append(results, tablecodec.EncodeRowKey(tid, codec.EncodeInt(nil, int64(num))))
}
return results
}
func generateIndexSplitKeyForInt(tid, idx int64, splitNum []int) [][]byte {
results := make([][]byte, 0, len(splitNum))
for _, num := range splitNum {
d := new(types.Datum)
d.SetInt64(int64(num))
b, err := codec.EncodeKey(nil, nil, *d)
if err != nil {
panic(err)
}
results = append(results, tablecodec.EncodeIndexSeekKey(tid, idx, b))
}
return results
}
type testChunkSizeControlKit struct {
store kv.Storage
dom *domain.Domain
tk *testkit.TestKit
client *testSlowClient
cluster cluster.Cluster
}
type testChunkSizeControlSuite struct {
m map[string]*testChunkSizeControlKit
}
func (s *testChunkSizeControlSuite) SetUpSuite(c *C) {
c.Skip("not stable because coprocessor may result in goroutine leak")
tableSQLs := map[string]string{}
tableSQLs["Limit&TableScan"] = "create table t (a int, primary key (a))"
tableSQLs["Limit&IndexScan"] = "create table t (a int, index idx_a(a))"
s.m = make(map[string]*testChunkSizeControlKit)
for name, sql := range tableSQLs {
// BootstrapSession is not thread-safe, so we have to prepare all resources in SetUp.
kit := new(testChunkSizeControlKit)
s.m[name] = kit
kit.client = &testSlowClient{regionDelay: make(map[uint64]time.Duration)}
var err error
kit.store, err = mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
kit.cluster = c
}),
mockstore.WithClientHijacker(func(c tikv.Client) tikv.Client {
kit.client.Client = c
return kit.client
}),
)
c.Assert(err, IsNil)
// init domain
kit.dom, err = session.BootstrapSession(kit.store)
c.Assert(err, IsNil)
// create the test table
kit.tk = testkit.NewTestKitWithInit(c, kit.store)
kit.tk.MustExec(sql)
}
}
func (s *testChunkSizeControlSuite) getKit(name string) (
kv.Storage, *domain.Domain, *testkit.TestKit, *testSlowClient, cluster.Cluster) {
x := s.m[name]
return x.store, x.dom, x.tk, x.client, x.cluster
}
func (s *testChunkSizeControlSuite) TestLimitAndTableScan(c *C) {
_, dom, tk, client, cluster := s.getKit("Limit&TableScan")
defer client.Close()
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tid := tbl.Meta().ID
// construct two regions split by 100
splitKeys := generateTableSplitKeyForInt(tid, []int{100})
regionIDs := manipulateCluster(cluster, splitKeys)
noDelayThreshold := time.Millisecond * 100
delayDuration := time.Second
delayThreshold := delayDuration * 9 / 10
tk.MustExec("insert into t values (1)") // insert one record into region1, and set a delay duration
client.SetDelay(regionIDs[0], delayDuration)
results := tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 1")
cost := s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Not(Less), delayThreshold) // have to wait for region1
tk.MustExec("insert into t values (101)") // insert one record into region2
results = tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 1")
cost = s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Less, noDelayThreshold) // region2 return quickly
results = tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 2")
cost = s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Not(Less), delayThreshold) // have to wait
}
func (s *testChunkSizeControlSuite) TestLimitAndIndexScan(c *C) {
_, dom, tk, client, cluster := s.getKit("Limit&IndexScan")
defer client.Close()
tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t"))
c.Assert(err, IsNil)
tid := tbl.Meta().ID
idx := tbl.Meta().Indices[0].ID
// construct two regions split by 100
splitKeys := generateIndexSplitKeyForInt(tid, idx, []int{100})
regionIDs := manipulateCluster(cluster, splitKeys)
noDelayThreshold := time.Millisecond * 100
delayDuration := time.Second
delayThreshold := delayDuration * 9 / 10
tk.MustExec("insert into t values (1)") // insert one record into region1, and set a delay duration
client.SetDelay(regionIDs[0], delayDuration)
results := tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 1")
cost := s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Not(Less), delayThreshold) // have to wait for region1
tk.MustExec("insert into t values (101)") // insert one record into region2
results = tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 1")
cost = s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Less, noDelayThreshold) // region2 return quickly
results = tk.MustQuery("explain analyze select * from t where t.a > 0 and t.a < 200 limit 2")
cost = s.parseTimeCost(c, results.Rows()[0])
c.Assert(cost, Not(Less), delayThreshold) // have to wait
}
func (s *testChunkSizeControlSuite) parseTimeCost(c *C, line []interface{}) time.Duration {
lineStr := fmt.Sprintf("%v", line)
idx := strings.Index(lineStr, "time:")
c.Assert(idx, Not(Equals), -1)
lineStr = lineStr[idx+len("time:"):]
idx = strings.Index(lineStr, ",")
c.Assert(idx, Not(Equals), -1)
timeStr := lineStr[:idx]
d, err := time.ParseDuration(timeStr)
c.Assert(err, IsNil)
return d
}
| executor/chunk_size_control_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0008682208135724068,
0.0002675493306014687,
0.00016193490591831505,
0.00017065447173081338,
0.0002060737751889974
] |
{
"id": 13,
"code_window": [
"\tc.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)\n",
"\ttxn := s.begin(c)\n",
"\ttxn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})\n",
"\terr := txn.Set(untouchedIndexKey, untouchedIndexValue)\n",
"\tc.Assert(err, IsNil)\n",
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)\n",
"\tc.Assert(err, IsNil)\n",
"\tcommit, err := txn.NewCommitter(1)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1038
} | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
drivertxn "github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/store/tikv/config"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/mockstore/cluster"
"github.com/pingcap/tidb/store/tikv/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/pingcap/tidb/tablecodec"
)
var (
txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize()
bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold()
)
type testCommitterSuite struct {
OneByOneSuite
cluster cluster.Cluster
store tikv.StoreProbe
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
cluster := mocktikv.NewCluster(mvccStore)
mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c"))
s.cluster = cluster
client := mocktikv.NewRPCClient(cluster, mvccStore, nil)
pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)}
spkv := tikv.NewMockSafePointKV()
store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client)
store.EnableTxnLocalLatches(1024000)
c.Assert(err, IsNil)
// TODO: make it possible
// store, err := mockstore.NewMockStore(
// mockstore.WithStoreType(mockstore.MockTiKV),
// mockstore.WithClusterInspector(func(c cluster.Cluster) {
// mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c"))
// s.cluster = c
// }),
// mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client {
// return &codecPDClient{c}
// }),
// mockstore.WithTxnLocalLatches(1024000),
// )
// c.Assert(err, IsNil)
s.store = tikv.StoreProbe{KVStore: store}
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000)
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn
}
func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
txn.SetOption(kv.EnableAsyncCommit, true)
return txn
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) {
conf := *config.GetGlobalConfig()
oldConf := conf
defer config.StoreGlobalConfig(&oldConf)
conf.TiKVClient.TTLRefreshedTxnSize = 0
config.StoreGlobalConfig(&conf)
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("ba"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("bb"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
{
txn := s.begin(c)
err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists)
c.Assert(err, IsNil)
err = txn.Set([]byte("de"), []byte{1})
c.Assert(err, IsNil)
err = txn.Delete([]byte("dd"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Check(committer.IsTTLRunning(), IsTrue)
}
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.PrewriteAllMutations(ctx)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
}
commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
committer.SetCommitTS(commitTS)
err = committer.CommitMutations(ctx)
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel the context
err = committer.PrewriteAllMutations(ctx)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(tidbkv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) {
// For a normal transaction, if RPC returns context.Canceled error while sending commit
// requests, the transaction should go to the undetermined state.
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("va"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.PrewriteAllMutations(context.Background())
c.Assert(err, IsNil)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/rpcContextCancelErr"), IsNil)
}()
err = committer.CommitMutations(context.Background())
c.Assert(committer.GetUndeterminedErr(), NotNil)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key)
c.Assert(err, IsNil)
return loc.Region.GetID()
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 500, nil)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver,
})
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.SetTiKVClient(&slowClient{
Client: s.store.GetTiKVClient(),
regionDelays: delays,
})
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
tikv.Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.SetStartTS(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
ctx := context.Background()
committer.Cleanup(ctx)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "already rolled back")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.SetStartTS(txn1.StartTS() - 1)
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := txn2.NewCommitter(0)
c.Assert(err, IsNil)
committer.Cleanup(ctx)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
err = committer2.Execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := txn1.NewCommitter(2)
c.Assert(err, IsNil)
err = committer1.Execute(context.Background())
c.Assert(err, NotNil)
committer1.WaitCleanup()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.GetMutations().GetOp(0),
Key: committer.GetMutations().GetKey(0),
Value: committer.GetMutations().GetValue(0),
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.GetPrimaryKey(),
StartVersion: committer.GetStartTS(),
LockTtl: committer.GetLockTTL(),
MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.SetCommitTS(committer.GetStartTS() + 1)
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.CommitMutations(context.Background())
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.GetStartTS()+2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.GetCtx(), []byte("x"))
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.GetCtx(), []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetForUpdateTS(100)
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := []byte("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
_, _ = txn.GetUnionStore().Get(context.TODO(), key)
c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := []byte("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def"))
c.Assert(err, IsNil)
c.Assert(txn.CollectLockedKeys(), HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := []byte("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := s.store.NewLockResolver()
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil)
c.Assert(err, IsNil)
c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope})
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := []byte("key")
key2 := []byte("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key)
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2)
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := []byte("key")
txn := s.begin(c)
txn.SetStartTS(oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1))
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150))
}
func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a") // insert but deleted key at first pos in txn1
k2 := []byte("b") // insert key at second pos in txn1
k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2
// insert k1, k2, k3 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.Get(context.Background(), k1)
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Set(k2, []byte{1})
txn1.Set(k3, []byte{2})
txn1.Delete(k1)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3(prewrite success and primary should be committed)
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
v, err := txn2.Get(context.Background(), k3)
c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it.
c.Assert(v[0], Equals, byte(2))
bk <- struct{}{}
txn1Done.Wait()
}
func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k3 and delete k1, k2, k3
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists)
txn1.Delete(k2)
txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists)
txn1.Delete(k3)
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
}
func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) {
s.cluster.SplitKeys([]byte("d"), []byte("a"), 4)
k1 := []byte("a")
k2 := []byte("b")
k3 := []byte("c")
// insert k1, k2, k2 and delete k1
txn1 := s.begin(c)
txn1.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists)
txn1.Delete(k1)
err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update
c.Assert(err, IsNil)
committer1, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
// setup test knob in txn's committer
ac, bk := make(chan struct{}), make(chan struct{})
committer1.SetPrimaryKeyBlocker(ac, bk)
txn1.SetCommitter(committer1)
var txn1Done sync.WaitGroup
txn1Done.Add(1)
go func() {
err1 := txn1.Commit(context.Background())
c.Assert(err1, IsNil)
txn1Done.Done()
}()
// resume after after primary key be committed
<-ac
// start txn2 to read k3
txn2 := s.begin(c)
txn2.DelOption(kv.Pessimistic)
s.store.ClearTxnLatches()
err = txn2.Set(k3, []byte{33})
c.Assert(err, IsNil)
var meetLocks []*tikv.Lock
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
resolver.SetMeetLockCallback(func(locks []*tikv.Lock) {
meetLocks = append(meetLocks, locks...)
})
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
bk <- struct{}{}
txn1Done.Wait()
c.Assert(meetLocks[0].Primary[0], Equals, k2[0])
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := []byte("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tidbkv.LockNoWait, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, kv.ErrLockAcquireFailAndNoWaitSet.Error())
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, kv.ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil)
loc, err := s.store.GetRegionCache().LocateKey(bo, key)
c.Assert(err, IsNil)
req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1)
resp, err := s.store.SendReq(bo, req, loc.Region, 5000)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// k1 is the primary lock of txn1.
k1 := []byte("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock.
k2 := []byte("k2")
k3 := []byte("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key.
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(ctx, lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key.
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k2, k3)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded.
txn1.GetCommitter().CloseTTLManager()
var status tikv.TxnStatus
bo := tikv.NewBackofferWithVars(ctx, 5000, nil)
lockKey2 := &tikv.Lock{
Key: k2,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: 0, // let the primary lock k1 expire doing check.
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS(),
}
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback)
// Txn2 tries to lock the secondary key k2, there should be no dead loop.
// Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock
// and the other secondary key k3 should succeed if there is no fail point enabled.
status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()}
err = txn2.LockKeys(ctx, lockCtx, k2)
c.Assert(err, IsNil)
// Pessimistic rollback using smaller forUpdateTS does not take effect.
lockKey3 := &tikv.Lock{
Key: k3,
Primary: k1,
TxnID: txn1.StartTS(),
TTL: tikv.ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.StartTS() - 1,
}
err = resolver.ResolvePessimisticLock(ctx, lockKey3)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err = txn1.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
// After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this
// case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved.
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn3.LockKeys(ctx, lockCtx, k3)
c.Assert(err, IsNil)
status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false)
c.Assert(err, IsNil)
c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := []byte("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := []byte("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(kv.ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) {
untouchedIndexKey := []byte("t00000001_i000000001")
untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49}
noValueIndexKey := []byte("t00000001_i000000002")
c.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)
txn := s.begin(c)
txn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})
err := txn.Set(untouchedIndexKey, untouchedIndexValue)
c.Assert(err, IsNil)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)
c.Assert(err, IsNil)
commit, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey})
c.Assert(mutation.Len(), Equals, 2)
c.Assert(mutation.GetOp(0), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey)
c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue)
c.Assert(mutation.GetOp(1), Equals, pb.Op_Lock)
c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey)
c.Assert(mutation.GetValue(1), BytesEquals, []byte{})
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys([]byte("z"), []byte("a"), 2)
k1 := []byte("a_deadlock_k1")
k2 := []byte("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := txn1.NewCommitter(1)
c.Assert(err, IsNil)
commit1.SetPrimaryKey(k1)
commit1.SetTxnSize(1000 * 1024 * 1024)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := txn2.NewCommitter(2)
c.Assert(err, IsNil)
commit2.SetPrimaryKey(k2)
commit2.SetTxnSize(1000 * 1024 * 1024)
s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.Execute(context.Background())
wg.Done()
}()
ch <- commit1.Execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks.
func (s *testCommitterSuite) TestPushPessimisticLock(c *C) {
// k1 is the primary key.
k1, k2 := []byte("a"), []byte("b")
ctx := context.Background()
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1, k2)
c.Assert(err, IsNil)
txn1.Set(k2, []byte("v2"))
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
// Strip the prewrite of the primary key.
committer.SetMutations(committer.GetMutations().Slice(1, 2))
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// The primary lock is a pessimistic lock and the secondary lock is a optimistic lock.
lock1 := s.getLockInfo(c, k1)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, k1)
lock2 := s.getLockInfo(c, k2)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, k1)
txn2 := s.begin(c)
start := time.Now()
_, err = txn2.Get(ctx, k2)
elapsed := time.Since(start)
// The optimistic lock shouldn't block reads.
c.Assert(elapsed, Less, 500*time.Millisecond)
c.Assert(tidbkv.IsErrNotFound(err), IsTrue)
txn1.Rollback()
txn2.Rollback()
}
// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks,
// using clean whole region resolve path
func (s *testCommitterSuite) TestResolveMixed(c *C) {
atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value
ctx := context.Background()
// pk is the primary lock of txn1
pk := []byte("pk")
secondaryLockkeys := make([][]byte, 0, bigTxnThreshold)
for i := 0; i < bigTxnThreshold; i++ {
optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i))
secondaryLockkeys = append(secondaryLockkeys, optimisticLock)
}
pessimisticLockKey := []byte("pessimisticLockKey")
// make the optimistic and pessimistic lock left with primary lock not found
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, pk)
c.Assert(err, IsNil)
// lock the optimistic keys
for i := 0; i < bigTxnThreshold; i++ {
txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i)))
}
committer := txn1.GetCommitter()
err = committer.InitKeysAndMutations()
c.Assert(err, IsNil)
err = committer.PrewriteAllMutations(ctx)
c.Assert(err, IsNil)
// lock the pessimistic keys
err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
lock1 := s.getLockInfo(c, pessimisticLockKey)
c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock)
c.Assert(lock1.PrimaryLock, BytesEquals, pk)
optimisticLockKey := secondaryLockkeys[0]
lock2 := s.getLockInfo(c, optimisticLockKey)
c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put)
c.Assert(lock2.PrimaryLock, BytesEquals, pk)
// stop txn ttl manager and remove primary key, make the other keys left behind
committer.CloseTTLManager()
muts := tikv.NewPlainMutations(1)
muts.Push(kvrpcpb.Op_Lock, pk, nil, true)
err = committer.PessimisticRollbackMutations(context.Background(), &muts)
c.Assert(err, IsNil)
// try to resolve the left optimistic locks, use clean whole region
time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond)
optimisticLockInfo := s.getLockInfo(c, optimisticLockKey)
lock := tikv.NewLock(optimisticLockInfo)
resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()}
err = resolver.ResolveLock(ctx, lock)
c.Assert(err, IsNil)
// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}
err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)
c.Assert(err, IsNil)
err = txn1.Rollback()
c.Assert(err, IsNil)
err = txn2.Rollback()
c.Assert(err, IsNil)
}
// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an
// accurate list of secondary keys.
func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.beginAsyncCommit(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
// Some duplicates.
for i := byte(50); i < 120; i += 10 {
err := txn.Set([]byte{i}, val[512:700])
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
mock := mockClient{inner: s.store.GetTiKVClient()}
s.store.SetTiKVClient(&mock)
ctx := context.Background()
// TODO remove this when minCommitTS is returned from mockStore prewrite response.
committer.SetMinCommitTS(committer.GetStartTS() + 10)
committer.SetNoFallBack()
err = committer.Execute(ctx)
c.Assert(err, IsNil)
c.Assert(mock.seenPrimaryReq > 0, IsTrue)
c.Assert(mock.seenSecondaryReq > 0, IsTrue)
}
func (s *testCommitterSuite) TestAsyncCommit(c *C) {
ctx := context.Background()
pk := []byte("tpk")
pkVal := []byte("pkVal")
k1 := []byte("tk1")
k1Val := []byte("k1Val")
txn1 := s.beginAsyncCommit(c)
err := txn1.Set(pk, pkVal)
c.Assert(err, IsNil)
err = txn1.Set(k1, k1Val)
c.Assert(err, IsNil)
committer, err := txn1.NewCommitter(0)
c.Assert(err, IsNil)
committer.SetSessionID(1)
committer.SetMinCommitTS(txn1.StartTS() + 10)
err = committer.Execute(ctx)
c.Assert(err, IsNil)
s.checkValues(c, map[string]string{
string(pk): string(pkVal),
string(k1): string(k1Val),
})
}
func updateGlobalConfig(f func(conf *config.Config)) {
g := config.GetGlobalConfig()
newConf := *g
f(&newConf)
config.StoreGlobalConfig(&newConf)
}
// restoreFunc gets a function that restore the config to the current value.
func restoreGlobalConfFunc() (restore func()) {
g := config.GetGlobalConfig()
return func() {
config.StoreGlobalConfig(g)
}
}
func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) {
defer restoreGlobalConfFunc()()
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 16
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64
})
txn := s.beginAsyncCommit(c)
buf := []byte{0, 0, 0, 0}
// Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes.
for i := 0; i < 16; i++ {
buf[0] = byte(i)
err := txn.Set(buf, []byte("v"))
c.Assert(err, IsNil)
}
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
c.Assert(committer.CheckAsyncCommit(), IsTrue)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 15
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
updateGlobalConfig(func(conf *config.Config) {
conf.TiKVClient.AsyncCommit.KeysLimit = 20
conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63
})
c.Assert(committer.CheckAsyncCommit(), IsFalse)
}
type mockClient struct {
inner tikv.Client
seenPrimaryReq uint32
seenSecondaryReq uint32
}
func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
// If we find a prewrite request, check if it satisfies our constraints.
if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok {
if pr.UseAsyncCommit {
if isPrimary(pr) {
// The primary key should not be included, nor should there be any duplicates. All keys should be present.
if !includesPrimary(pr) && allKeysNoDups(pr) {
atomic.StoreUint32(&m.seenPrimaryReq, 1)
}
} else {
// Secondaries should only be sent with the primary key
if len(pr.Secondaries) == 0 {
atomic.StoreUint32(&m.seenSecondaryReq, 1)
}
}
}
}
return m.inner.SendRequest(ctx, addr, req, timeout)
}
func (m *mockClient) Close() error {
return m.inner.Close()
}
func isPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, m := range req.Mutations {
if bytes.Equal(req.PrimaryLock, m.Key) {
return true
}
}
return false
}
func includesPrimary(req *kvrpcpb.PrewriteRequest) bool {
for _, k := range req.Secondaries {
if bytes.Equal(req.PrimaryLock, k) {
return true
}
}
return false
}
func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool {
check := make(map[string]bool)
// Create the check map and check for duplicates.
for _, k := range req.Secondaries {
s := string(k)
if check[s] {
return false
}
check[s] = true
}
// Check every key is present.
for i := byte(50); i < 120; i++ {
k := []byte{i}
if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] {
return false
}
}
return true
}
| store/tikv/tests/2pc_test.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.9964742064476013,
0.11461545526981354,
0.00016306850011460483,
0.0028647775761783123,
0.30311423540115356
] |
{
"id": 13,
"code_window": [
"\tc.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)\n",
"\ttxn := s.begin(c)\n",
"\ttxn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})\n",
"\terr := txn.Set(untouchedIndexKey, untouchedIndexValue)\n",
"\tc.Assert(err, IsNil)\n",
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)\n",
"\tc.Assert(err, IsNil)\n",
"\tcommit, err := txn.NewCommitter(1)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1038
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs
import (
"unsafe"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/set"
)
const (
// DefPartialResult4SumFloat64Size is the size of partialResult4SumFloat64
DefPartialResult4SumFloat64Size = int64(unsafe.Sizeof(partialResult4SumFloat64{}))
// DefPartialResult4SumDecimalSize is the size of partialResult4SumDecimal
DefPartialResult4SumDecimalSize = int64(unsafe.Sizeof(partialResult4SumDecimal{}))
// DefPartialResult4SumDistinctFloat64Size is the size of partialResult4SumDistinctFloat64
DefPartialResult4SumDistinctFloat64Size = int64(unsafe.Sizeof(partialResult4SumDistinctFloat64{}))
// DefPartialResult4SumDistinctDecimalSize is the size of partialResult4SumDistinctDecimal
DefPartialResult4SumDistinctDecimalSize = int64(unsafe.Sizeof(partialResult4SumDistinctDecimal{}))
)
type partialResult4SumFloat64 struct {
val float64
notNullRowCount int64
}
type partialResult4SumDecimal struct {
val types.MyDecimal
notNullRowCount int64
}
type partialResult4SumDistinctFloat64 struct {
val float64
isNull bool
valSet set.Float64SetWithMemoryUsage
}
type partialResult4SumDistinctDecimal struct {
val types.MyDecimal
isNull bool
valSet set.StringSetWithMemoryUsage
}
type baseSumAggFunc struct {
baseAggFunc
}
type baseSum4Float64 struct {
baseSumAggFunc
}
func (e *baseSum4Float64) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := new(partialResult4SumFloat64)
return PartialResult(p), DefPartialResult4SumFloat64Size
}
func (e *baseSum4Float64) ResetPartialResult(pr PartialResult) {
p := (*partialResult4SumFloat64)(pr)
p.val = 0
p.notNullRowCount = 0
}
func (e *baseSum4Float64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4SumFloat64)(pr)
if p.notNullRowCount == 0 {
chk.AppendNull(e.ordinal)
return nil
}
chk.AppendFloat64(e.ordinal, p.val)
return nil
}
func (e *baseSum4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4SumFloat64)(pr)
for _, row := range rowsInGroup {
input, isNull, err := e.args[0].EvalReal(sctx, row)
if err != nil {
return 0, err
}
if isNull {
continue
}
p.val += input
p.notNullRowCount++
}
return 0, nil
}
func (e *baseSum4Float64) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) (memDelta int64, err error) {
p1, p2 := (*partialResult4SumFloat64)(src), (*partialResult4SumFloat64)(dst)
if p1.notNullRowCount == 0 {
return 0, nil
}
p2.val += p1.val
p2.notNullRowCount += p1.notNullRowCount
return 0, nil
}
type sum4Float64 struct {
baseSum4Float64
}
func (e *sum4Float64) Slide(sctx sessionctx.Context, rows []chunk.Row, lastStart, lastEnd uint64, shiftStart, shiftEnd uint64, pr PartialResult) error {
p := (*partialResult4SumFloat64)(pr)
for i := uint64(0); i < shiftEnd; i++ {
input, isNull, err := e.args[0].EvalReal(sctx, rows[lastEnd+i])
if err != nil {
return err
}
if isNull {
continue
}
p.val += input
p.notNullRowCount++
}
for i := uint64(0); i < shiftStart; i++ {
input, isNull, err := e.args[0].EvalReal(sctx, rows[lastStart+i])
if err != nil {
return err
}
if isNull {
continue
}
p.val -= input
p.notNullRowCount--
}
return nil
}
type sum4Float64HighPrecision struct {
baseSum4Float64
}
type sum4Decimal struct {
baseSumAggFunc
}
func (e *sum4Decimal) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := new(partialResult4SumDecimal)
return PartialResult(p), DefPartialResult4SumDecimalSize
}
func (e *sum4Decimal) ResetPartialResult(pr PartialResult) {
p := (*partialResult4SumDecimal)(pr)
p.notNullRowCount = 0
}
func (e *sum4Decimal) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4SumDecimal)(pr)
if p.notNullRowCount == 0 {
chk.AppendNull(e.ordinal)
return nil
}
err := p.val.Round(&p.val, e.frac, types.ModeHalfEven)
if err != nil {
return err
}
chk.AppendMyDecimal(e.ordinal, &p.val)
return nil
}
func (e *sum4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4SumDecimal)(pr)
for _, row := range rowsInGroup {
input, isNull, err := e.args[0].EvalDecimal(sctx, row)
if err != nil {
return 0, err
}
if isNull {
continue
}
if p.notNullRowCount == 0 {
p.val = *input
p.notNullRowCount = 1
continue
}
newSum := new(types.MyDecimal)
err = types.DecimalAdd(&p.val, input, newSum)
if err != nil {
return 0, err
}
p.val = *newSum
p.notNullRowCount++
}
return 0, nil
}
func (e *sum4Decimal) Slide(sctx sessionctx.Context, rows []chunk.Row, lastStart, lastEnd uint64, shiftStart, shiftEnd uint64, pr PartialResult) error {
p := (*partialResult4SumDecimal)(pr)
for i := uint64(0); i < shiftEnd; i++ {
input, isNull, err := e.args[0].EvalDecimal(sctx, rows[lastEnd+i])
if err != nil {
return err
}
if isNull {
continue
}
if p.notNullRowCount == 0 {
p.val = *input
p.notNullRowCount = 1
continue
}
newSum := new(types.MyDecimal)
err = types.DecimalAdd(&p.val, input, newSum)
if err != nil {
return err
}
p.val = *newSum
p.notNullRowCount++
}
for i := uint64(0); i < shiftStart; i++ {
input, isNull, err := e.args[0].EvalDecimal(sctx, rows[lastStart+i])
if err != nil {
return err
}
if isNull {
continue
}
newSum := new(types.MyDecimal)
err = types.DecimalSub(&p.val, input, newSum)
if err != nil {
return err
}
p.val = *newSum
p.notNullRowCount--
}
return nil
}
func (e *sum4Decimal) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) (memDelta int64, err error) {
p1, p2 := (*partialResult4SumDecimal)(src), (*partialResult4SumDecimal)(dst)
if p1.notNullRowCount == 0 {
return 0, nil
}
newSum := new(types.MyDecimal)
err = types.DecimalAdd(&p1.val, &p2.val, newSum)
if err != nil {
return 0, err
}
p2.val = *newSum
p2.notNullRowCount += p1.notNullRowCount
return 0, nil
}
type sum4DistinctFloat64 struct {
baseSumAggFunc
}
func (e *sum4DistinctFloat64) AllocPartialResult() (pr PartialResult, memDelta int64) {
setSize := int64(0)
p := new(partialResult4SumDistinctFloat64)
p.isNull = true
p.valSet, setSize = set.NewFloat64SetWithMemoryUsage()
return PartialResult(p), DefPartialResult4SumDistinctFloat64Size + setSize
}
func (e *sum4DistinctFloat64) ResetPartialResult(pr PartialResult) {
p := (*partialResult4SumDistinctFloat64)(pr)
p.isNull = true
p.valSet, _ = set.NewFloat64SetWithMemoryUsage()
}
func (e *sum4DistinctFloat64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4SumDistinctFloat64)(pr)
for _, row := range rowsInGroup {
input, isNull, err := e.args[0].EvalReal(sctx, row)
if err != nil {
return memDelta, err
}
if isNull || p.valSet.Exist(input) {
continue
}
memDelta += p.valSet.Insert(input)
if p.isNull {
p.val = input
p.isNull = false
continue
}
p.val += input
}
return memDelta, nil
}
func (e *sum4DistinctFloat64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4SumDistinctFloat64)(pr)
if p.isNull {
chk.AppendNull(e.ordinal)
return nil
}
chk.AppendFloat64(e.ordinal, p.val)
return nil
}
type sum4DistinctDecimal struct {
baseSumAggFunc
}
func (e *sum4DistinctDecimal) AllocPartialResult() (pr PartialResult, memDelta int64) {
p := new(partialResult4SumDistinctDecimal)
p.isNull = true
setSize := int64(0)
p.valSet, setSize = set.NewStringSetWithMemoryUsage()
return PartialResult(p), DefPartialResult4SumDistinctDecimalSize + setSize
}
func (e *sum4DistinctDecimal) ResetPartialResult(pr PartialResult) {
p := (*partialResult4SumDistinctDecimal)(pr)
p.isNull = true
p.valSet, _ = set.NewStringSetWithMemoryUsage()
}
func (e *sum4DistinctDecimal) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) {
p := (*partialResult4SumDistinctDecimal)(pr)
for _, row := range rowsInGroup {
input, isNull, err := e.args[0].EvalDecimal(sctx, row)
if err != nil {
return memDelta, err
}
if isNull {
continue
}
hash, err := input.ToHashKey()
if err != nil {
return memDelta, err
}
decStr := string(hack.String(hash))
if p.valSet.Exist(decStr) {
continue
}
memDelta += p.valSet.Insert(decStr)
if p.isNull {
p.val = *input
p.isNull = false
continue
}
newSum := new(types.MyDecimal)
if err = types.DecimalAdd(&p.val, input, newSum); err != nil {
return memDelta, err
}
p.val = *newSum
}
return memDelta, nil
}
func (e *sum4DistinctDecimal) AppendFinalResult2Chunk(sctx sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error {
p := (*partialResult4SumDistinctDecimal)(pr)
if p.isNull {
chk.AppendNull(e.ordinal)
return nil
}
chk.AppendMyDecimal(e.ordinal, &p.val)
return nil
}
| executor/aggfuncs/func_sum.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.00017956084047909826,
0.0001717292907414958,
0.000161972435307689,
0.0001727309136185795,
0.000004168552095507039
] |
{
"id": 13,
"code_window": [
"\tc.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)\n",
"\ttxn := s.begin(c)\n",
"\ttxn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})\n",
"\terr := txn.Set(untouchedIndexKey, untouchedIndexValue)\n",
"\tc.Assert(err, IsNil)\n",
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)\n",
"\tc.Assert(err, IsNil)\n",
"\tcommit, err := txn.NewCommitter(1)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1038
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv_test
import (
. "github.com/pingcap/check"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/store/mockstore/unistore"
"github.com/pingcap/tidb/store/tikv"
)
type testPrewriteSuite struct {
store *tikv.KVStore
}
var _ = Suite(&testPrewriteSuite{})
func (s *testPrewriteSuite) SetUpTest(c *C) {
client, pdClient, cluster, err := unistore.New("")
c.Assert(err, IsNil)
unistore.BootstrapWithSingleStore(cluster)
store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0)
c.Assert(err, IsNil)
s.store = store
}
func (s *testPrewriteSuite) TestSetMinCommitTSInAsyncCommit(c *C) {
t, err := s.store.Begin()
c.Assert(err, IsNil)
txn := tikv.TxnProbe{KVTxn: t}
err = txn.Set([]byte("k"), []byte("v"))
c.Assert(err, IsNil)
committer, err := txn.NewCommitter(1)
c.Assert(err, IsNil)
committer.SetUseAsyncCommit()
buildRequest := func() *pb.PrewriteRequest {
req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations(), 1)
return req.Req.(*pb.PrewriteRequest)
}
// no forUpdateTS
req := buildRequest()
c.Assert(req.MinCommitTs, Equals, txn.StartTS()+1)
// forUpdateTS is set
committer.SetForUpdateTS(txn.StartTS() + (5 << 18))
req = buildRequest()
c.Assert(req.MinCommitTs, Equals, committer.GetForUpdateTS()+1)
// minCommitTS is set
committer.SetMinCommitTS(txn.StartTS() + (10 << 18))
req = buildRequest()
c.Assert(req.MinCommitTs, Equals, committer.GetMinCommitTS())
}
| store/tikv/tests/prewrite_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.015491497702896595,
0.003991842269897461,
0.00016453697753604501,
0.000585904810577631,
0.005343107506632805
] |
{
"id": 13,
"code_window": [
"\tc.Assert(tablecodec.IsUntouchedIndexKValue(untouchedIndexKey, untouchedIndexValue), IsTrue)\n",
"\ttxn := s.begin(c)\n",
"\ttxn.SetOption(kv.KVFilter, drivertxn.TiDBKVFilter{})\n",
"\terr := txn.Set(untouchedIndexKey, untouchedIndexValue)\n",
"\tc.Assert(err, IsNil)\n",
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey)\n",
"\tc.Assert(err, IsNil)\n",
"\tcommit, err := txn.NewCommitter(1)\n",
"\tc.Assert(err, IsNil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1038
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"fmt"
"math"
"math/rand"
"time"
"github.com/cznic/mathutil"
. "github.com/pingcap/check"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
)
type requiredRowsDataSource struct {
baseExecutor
totalRows int
count int
ctx sessionctx.Context
expectedRowsRet []int
numNextCalled int
generator func(valType *types.FieldType) interface{}
}
func newRequiredRowsDataSourceWithGenerator(ctx sessionctx.Context, totalRows int, expectedRowsRet []int,
gen func(valType *types.FieldType) interface{}) *requiredRowsDataSource {
ds := newRequiredRowsDataSource(ctx, totalRows, expectedRowsRet)
ds.generator = gen
return ds
}
func newRequiredRowsDataSource(ctx sessionctx.Context, totalRows int, expectedRowsRet []int) *requiredRowsDataSource {
// the schema of output is fixed now, which is [Double, Long]
retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDouble), types.NewFieldType(mysql.TypeLonglong)}
cols := make([]*expression.Column, len(retTypes))
for i := range retTypes {
cols[i] = &expression.Column{Index: i, RetType: retTypes[i]}
}
schema := expression.NewSchema(cols...)
baseExec := newBaseExecutor(ctx, schema, 0)
return &requiredRowsDataSource{baseExec, totalRows, 0, ctx, expectedRowsRet, 0, defaultGenerator}
}
func (r *requiredRowsDataSource) Next(ctx context.Context, req *chunk.Chunk) error {
defer func() {
if r.expectedRowsRet == nil {
r.numNextCalled++
return
}
rowsRet := req.NumRows()
expected := r.expectedRowsRet[r.numNextCalled]
if rowsRet != expected {
panic(fmt.Sprintf("unexpected number of rows returned, obtain: %v, expected: %v", rowsRet, expected))
}
r.numNextCalled++
}()
req.Reset()
if r.count > r.totalRows {
return nil
}
required := mathutil.Min(req.RequiredRows(), r.totalRows-r.count)
for i := 0; i < required; i++ {
req.AppendRow(r.genOneRow())
}
r.count += required
return nil
}
func (r *requiredRowsDataSource) genOneRow() chunk.Row {
row := chunk.MutRowFromTypes(retTypes(r))
for i, tp := range retTypes(r) {
row.SetValue(i, r.generator(tp))
}
return row.ToRow()
}
func defaultGenerator(valType *types.FieldType) interface{} {
switch valType.Tp {
case mysql.TypeLong, mysql.TypeLonglong:
return int64(rand.Int())
case mysql.TypeDouble:
return rand.Float64()
default:
panic("not implement")
}
}
func (r *requiredRowsDataSource) checkNumNextCalled() error {
if r.numNextCalled != len(r.expectedRowsRet) {
return fmt.Errorf("unexpected number of call on Next, obtain: %v, expected: %v",
r.numNextCalled, len(r.expectedRowsRet))
}
return nil
}
func (s *testExecSuite) TestLimitRequiredRows(c *C) {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
limitOffset int
limitCount int
requiredRows []int
expectedRows []int
expectedRowsDS []int
}{
{
totalRows: 20,
limitOffset: 0,
limitCount: 10,
requiredRows: []int{3, 5, 1, 500, 500},
expectedRows: []int{3, 5, 1, 1, 0},
expectedRowsDS: []int{3, 5, 1, 1},
},
{
totalRows: 20,
limitOffset: 0,
limitCount: 25,
requiredRows: []int{9, 500},
expectedRows: []int{9, 11},
expectedRowsDS: []int{9, 11},
},
{
totalRows: 100,
limitOffset: 50,
limitCount: 30,
requiredRows: []int{10, 5, 10, 20},
expectedRows: []int{10, 5, 10, 5},
expectedRowsDS: []int{60, 5, 10, 5},
},
{
totalRows: 100,
limitOffset: 101,
limitCount: 10,
requiredRows: []int{10},
expectedRows: []int{0},
expectedRowsDS: []int{100, 0},
},
{
totalRows: maxChunkSize + 20,
limitOffset: maxChunkSize + 1,
limitCount: 10,
requiredRows: []int{3, 3, 3, 100},
expectedRows: []int{3, 3, 3, 1},
expectedRowsDS: []int{maxChunkSize, 4, 3, 3, 1},
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
exec := buildLimitExec(sctx, ds, testCase.limitOffset, testCase.limitCount)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], sctx.GetSessionVars().MaxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func buildLimitExec(ctx sessionctx.Context, src Executor, offset, count int) Executor {
n := mathutil.Min(count, ctx.GetSessionVars().MaxChunkSize)
base := newBaseExecutor(ctx, src.Schema(), 0, src)
base.initCap = n
limitExec := &LimitExec{
baseExecutor: base,
begin: uint64(offset),
end: uint64(offset + count),
}
return limitExec
}
func defaultCtx() sessionctx.Context {
ctx := mock.NewContext()
ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize
ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize
ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, ctx.GetSessionVars().MemQuotaQuery)
ctx.GetSessionVars().StmtCtx.DiskTracker = disk.NewTracker(-1, -1)
ctx.GetSessionVars().SnapshotTS = uint64(1)
return ctx
}
func (s *testExecSuite) TestSortRequiredRows(c *C) {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
groupBy []int
requiredRows []int
expectedRows []int
expectedRowsDS []int
}{
{
totalRows: 10,
groupBy: []int{0},
requiredRows: []int{1, 5, 3, 10},
expectedRows: []int{1, 5, 3, 1},
expectedRowsDS: []int{10, 0},
},
{
totalRows: 10,
groupBy: []int{0, 1},
requiredRows: []int{1, 5, 3, 10},
expectedRows: []int{1, 5, 3, 1},
expectedRowsDS: []int{10, 0},
},
{
totalRows: maxChunkSize + 1,
groupBy: []int{0},
requiredRows: []int{1, 5, 3, 10, maxChunkSize},
expectedRows: []int{1, 5, 3, 10, (maxChunkSize + 1) - 1 - 5 - 3 - 10},
expectedRowsDS: []int{maxChunkSize, 1, 0},
},
{
totalRows: 3*maxChunkSize + 1,
groupBy: []int{0},
requiredRows: []int{1, 5, 3, 10, maxChunkSize},
expectedRows: []int{1, 5, 3, 10, maxChunkSize},
expectedRowsDS: []int{maxChunkSize, maxChunkSize, maxChunkSize, 1, 0},
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
byItems := make([]*util.ByItems, 0, len(testCase.groupBy))
for _, groupBy := range testCase.groupBy {
col := ds.Schema().Columns[groupBy]
byItems = append(byItems, &util.ByItems{Expr: col})
}
exec := buildSortExec(sctx, byItems, ds)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func buildSortExec(sctx sessionctx.Context, byItems []*util.ByItems, src Executor) Executor {
sortExec := SortExec{
baseExecutor: newBaseExecutor(sctx, src.Schema(), 0, src),
ByItems: byItems,
schema: src.Schema(),
}
return &sortExec
}
func (s *testExecSuite) TestTopNRequiredRows(c *C) {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
topNOffset int
topNCount int
groupBy []int
requiredRows []int
expectedRows []int
expectedRowsDS []int
}{
{
totalRows: 10,
topNOffset: 0,
topNCount: 10,
groupBy: []int{0},
requiredRows: []int{1, 1, 1, 1, 10},
expectedRows: []int{1, 1, 1, 1, 6},
expectedRowsDS: []int{10, 0},
},
{
totalRows: 100,
topNOffset: 15,
topNCount: 11,
groupBy: []int{0},
requiredRows: []int{1, 1, 1, 1, 10},
expectedRows: []int{1, 1, 1, 1, 7},
expectedRowsDS: []int{26, 100 - 26, 0},
},
{
totalRows: 100,
topNOffset: 95,
topNCount: 10,
groupBy: []int{0},
requiredRows: []int{1, 2, 3, 10},
expectedRows: []int{1, 2, 2, 0},
expectedRowsDS: []int{100, 0, 0},
},
{
totalRows: maxChunkSize + 20,
topNOffset: 1,
topNCount: 5,
groupBy: []int{0, 1},
requiredRows: []int{1, 3, 7, 10},
expectedRows: []int{1, 3, 1, 0},
expectedRowsDS: []int{6, maxChunkSize, 14, 0},
},
{
totalRows: maxChunkSize + maxChunkSize + 20,
topNOffset: maxChunkSize + 10,
topNCount: 8,
groupBy: []int{0, 1},
requiredRows: []int{1, 2, 3, 5, 7},
expectedRows: []int{1, 2, 3, 2, 0},
expectedRowsDS: []int{maxChunkSize, 18, maxChunkSize, 2, 0},
},
{
totalRows: maxChunkSize*5 + 10,
topNOffset: maxChunkSize*5 + 20,
topNCount: 10,
groupBy: []int{0, 1},
requiredRows: []int{1, 2, 3},
expectedRows: []int{0, 0, 0},
expectedRowsDS: []int{maxChunkSize, maxChunkSize, maxChunkSize, maxChunkSize, maxChunkSize, 10, 0, 0},
},
{
totalRows: maxChunkSize + maxChunkSize + 10,
topNOffset: 10,
topNCount: math.MaxInt64,
groupBy: []int{0, 1},
requiredRows: []int{1, 2, 3, maxChunkSize, maxChunkSize},
expectedRows: []int{1, 2, 3, maxChunkSize, maxChunkSize - 1 - 2 - 3},
expectedRowsDS: []int{maxChunkSize, maxChunkSize, 10, 0, 0},
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
byItems := make([]*util.ByItems, 0, len(testCase.groupBy))
for _, groupBy := range testCase.groupBy {
col := ds.Schema().Columns[groupBy]
byItems = append(byItems, &util.ByItems{Expr: col})
}
exec := buildTopNExec(sctx, testCase.topNOffset, testCase.topNCount, byItems, ds)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func buildTopNExec(ctx sessionctx.Context, offset, count int, byItems []*util.ByItems, src Executor) Executor {
sortExec := SortExec{
baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src),
ByItems: byItems,
schema: src.Schema(),
}
return &TopNExec{
SortExec: sortExec,
limit: &plannercore.PhysicalLimit{Count: uint64(count), Offset: uint64(offset)},
}
}
func (s *testExecSuite) TestSelectionRequiredRows(c *C) {
gen01 := func() func(valType *types.FieldType) interface{} {
closureCount := 0
return func(valType *types.FieldType) interface{} {
switch valType.Tp {
case mysql.TypeLong, mysql.TypeLonglong:
ret := int64(closureCount % 2)
closureCount++
return ret
case mysql.TypeDouble:
return rand.Float64()
default:
panic("not implement")
}
}
}
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
filtersOfCol1 int
requiredRows []int
expectedRows []int
expectedRowsDS []int
gen func(valType *types.FieldType) interface{}
}{
{
totalRows: 20,
requiredRows: []int{1, 2, 3, 4, 5, 20},
expectedRows: []int{1, 2, 3, 4, 5, 5},
expectedRowsDS: []int{20, 0},
},
{
totalRows: 20,
filtersOfCol1: 0,
requiredRows: []int{1, 3, 5, 7, 9},
expectedRows: []int{1, 3, 5, 1, 0},
expectedRowsDS: []int{20, 0, 0},
gen: gen01(),
},
{
totalRows: maxChunkSize + 20,
filtersOfCol1: 1,
requiredRows: []int{1, 3, 5, maxChunkSize},
expectedRows: []int{1, 3, 5, maxChunkSize/2 - 1 - 3 - 5 + 10},
expectedRowsDS: []int{maxChunkSize, 20, 0},
gen: gen01(),
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
var filters []expression.Expression
var ds *requiredRowsDataSource
if testCase.gen == nil {
// ignore filters
ds = newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
} else {
ds = newRequiredRowsDataSourceWithGenerator(sctx, testCase.totalRows, testCase.expectedRowsDS, testCase.gen)
f, err := expression.NewFunction(
sctx, ast.EQ, types.NewFieldType(byte(types.ETInt)), ds.Schema().Columns[1], &expression.Constant{
Value: types.NewDatum(testCase.filtersOfCol1),
RetType: types.NewFieldType(mysql.TypeTiny),
})
c.Assert(err, IsNil)
filters = append(filters, f)
}
exec := buildSelectionExec(sctx, filters, ds)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func buildSelectionExec(ctx sessionctx.Context, filters []expression.Expression, src Executor) Executor {
return &SelectionExec{
baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src),
filters: filters,
}
}
func (s *testExecSuite) TestProjectionUnparallelRequiredRows(c *C) {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
requiredRows []int
expectedRows []int
expectedRowsDS []int
}{
{
totalRows: 20,
requiredRows: []int{1, 3, 5, 7, 9},
expectedRows: []int{1, 3, 5, 7, 4},
expectedRowsDS: []int{1, 3, 5, 7, 4},
},
{
totalRows: maxChunkSize + 10,
requiredRows: []int{1, 3, 5, 7, 9, maxChunkSize},
expectedRows: []int{1, 3, 5, 7, 9, maxChunkSize - 1 - 3 - 5 - 7 - 9 + 10},
expectedRowsDS: []int{1, 3, 5, 7, 9, maxChunkSize - 1 - 3 - 5 - 7 - 9 + 10},
},
{
totalRows: maxChunkSize*2 + 10,
requiredRows: []int{1, 7, 9, maxChunkSize, maxChunkSize + 10},
expectedRows: []int{1, 7, 9, maxChunkSize, maxChunkSize + 10 - 1 - 7 - 9},
expectedRowsDS: []int{1, 7, 9, maxChunkSize, maxChunkSize + 10 - 1 - 7 - 9},
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
exprs := make([]expression.Expression, 0, len(ds.Schema().Columns))
if len(exprs) == 0 {
for _, col := range ds.Schema().Columns {
exprs = append(exprs, col)
}
}
exec := buildProjectionExec(sctx, exprs, ds, 0)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func (s *testExecSuite) TestProjectionParallelRequiredRows(c *C) {
c.Skip("not stable because of goroutine schedule")
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
numWorkers int
requiredRows []int
expectedRows []int
expectedRowsDS []int
}{
{
totalRows: 20,
numWorkers: 1,
requiredRows: []int{1, 2, 3, 4, 5, 6, 1, 1},
expectedRows: []int{1, 1, 2, 3, 4, 5, 4, 0},
expectedRowsDS: []int{1, 1, 2, 3, 4, 5, 4, 0},
},
{
totalRows: maxChunkSize * 2,
numWorkers: 1,
requiredRows: []int{7, maxChunkSize, maxChunkSize, maxChunkSize},
expectedRows: []int{7, 7, maxChunkSize, maxChunkSize - 14},
expectedRowsDS: []int{7, 7, maxChunkSize, maxChunkSize - 14, 0},
},
{
totalRows: 20,
numWorkers: 2,
requiredRows: []int{1, 2, 3, 4, 5, 6, 1, 1, 1},
expectedRows: []int{1, 1, 1, 2, 3, 4, 5, 3, 0},
expectedRowsDS: []int{1, 1, 1, 2, 3, 4, 5, 3, 0},
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS)
exprs := make([]expression.Expression, 0, len(ds.Schema().Columns))
if len(exprs) == 0 {
for _, col := range ds.Schema().Columns {
exprs = append(exprs, col)
}
}
exec := buildProjectionExec(sctx, exprs, ds, testCase.numWorkers)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
// wait projectionInputFetcher blocked on fetching data
// from child in the background.
time.Sleep(time.Millisecond * 25)
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func buildProjectionExec(ctx sessionctx.Context, exprs []expression.Expression, src Executor, numWorkers int) Executor {
return &ProjectionExec{
baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src),
numWorkers: int64(numWorkers),
evaluatorSuit: expression.NewEvaluatorSuite(exprs, false),
}
}
func divGenerator(factor int) func(valType *types.FieldType) interface{} {
closureCountInt := 0
closureCountDouble := 0
return func(valType *types.FieldType) interface{} {
switch valType.Tp {
case mysql.TypeLong, mysql.TypeLonglong:
ret := int64(closureCountInt / factor)
closureCountInt++
return ret
case mysql.TypeDouble:
ret := float64(closureCountInt / factor)
closureCountDouble++
return ret
default:
panic("not implement")
}
}
}
func (s *testExecSuite) TestStreamAggRequiredRows(c *C) {
maxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize
testCases := []struct {
totalRows int
aggFunc string
requiredRows []int
expectedRows []int
expectedRowsDS []int
gen func(valType *types.FieldType) interface{}
}{
{
totalRows: 1000000,
aggFunc: ast.AggFuncSum,
requiredRows: []int{1, 2, 3, 4, 5, 6, 7},
expectedRows: []int{1, 2, 3, 4, 5, 6, 7},
expectedRowsDS: []int{maxChunkSize},
gen: divGenerator(1),
},
{
totalRows: maxChunkSize * 3,
aggFunc: ast.AggFuncAvg,
requiredRows: []int{1, 3},
expectedRows: []int{1, 2},
expectedRowsDS: []int{maxChunkSize, maxChunkSize, maxChunkSize, 0},
gen: divGenerator(maxChunkSize),
},
{
totalRows: maxChunkSize*2 - 1,
aggFunc: ast.AggFuncMax,
requiredRows: []int{maxChunkSize/2 + 1},
expectedRows: []int{maxChunkSize/2 + 1},
expectedRowsDS: []int{maxChunkSize, maxChunkSize - 1},
gen: divGenerator(2),
},
}
for _, testCase := range testCases {
sctx := defaultCtx()
ctx := context.Background()
ds := newRequiredRowsDataSourceWithGenerator(sctx, testCase.totalRows, testCase.expectedRowsDS, testCase.gen)
childCols := ds.Schema().Columns
schema := expression.NewSchema(childCols...)
groupBy := []expression.Expression{childCols[1]}
aggFunc, err := aggregation.NewAggFuncDesc(sctx, testCase.aggFunc, []expression.Expression{childCols[0]}, true)
c.Assert(err, IsNil)
aggFuncs := []*aggregation.AggFuncDesc{aggFunc}
exec := buildStreamAggExecutor(sctx, ds, schema, aggFuncs, groupBy, 1, true)
c.Assert(exec.Open(ctx), IsNil)
chk := newFirstChunk(exec)
for i := range testCase.requiredRows {
chk.SetRequiredRows(testCase.requiredRows[i], maxChunkSize)
c.Assert(exec.Next(ctx, chk), IsNil)
c.Assert(chk.NumRows(), Equals, testCase.expectedRows[i])
}
c.Assert(exec.Close(), IsNil)
c.Assert(ds.checkNumNextCalled(), IsNil)
}
}
func (s *testExecSuite) TestMergeJoinRequiredRows(c *C) {
justReturn1 := func(valType *types.FieldType) interface{} {
switch valType.Tp {
case mysql.TypeLong, mysql.TypeLonglong:
return int64(1)
case mysql.TypeDouble:
return float64(1)
default:
panic("not support")
}
}
joinTypes := []plannercore.JoinType{plannercore.RightOuterJoin, plannercore.LeftOuterJoin,
plannercore.LeftOuterSemiJoin, plannercore.AntiLeftOuterSemiJoin}
for _, joinType := range joinTypes {
ctx := defaultCtx()
required := make([]int, 100)
for i := range required {
required[i] = rand.Int()%ctx.GetSessionVars().MaxChunkSize + 1
}
innerSrc := newRequiredRowsDataSourceWithGenerator(ctx, 1, nil, justReturn1) // just return one row: (1, 1)
outerSrc := newRequiredRowsDataSourceWithGenerator(ctx, 10000000, required, justReturn1) // always return (1, 1)
exec := buildMergeJoinExec(ctx, joinType, innerSrc, outerSrc)
c.Assert(exec.Open(context.Background()), IsNil)
chk := newFirstChunk(exec)
for i := range required {
chk.SetRequiredRows(required[i], ctx.GetSessionVars().MaxChunkSize)
c.Assert(exec.Next(context.Background(), chk), IsNil)
}
c.Assert(exec.Close(), IsNil)
c.Assert(outerSrc.checkNumNextCalled(), IsNil)
}
}
func genTestChunk4VecGroupChecker(chkRows []int, sameNum int) (expr []expression.Expression, inputs []*chunk.Chunk) {
chkNum := len(chkRows)
numRows := 0
inputs = make([]*chunk.Chunk, chkNum)
fts := make([]*types.FieldType, 1)
fts[0] = types.NewFieldType(mysql.TypeLonglong)
for i := 0; i < chkNum; i++ {
inputs[i] = chunk.New(fts, chkRows[i], chkRows[i])
numRows += chkRows[i]
}
var numGroups int
if numRows%sameNum == 0 {
numGroups = numRows / sameNum
} else {
numGroups = numRows/sameNum + 1
}
rand.Seed(time.Now().Unix())
nullPos := rand.Intn(numGroups)
cnt := 0
val := rand.Int63()
for i := 0; i < chkNum; i++ {
col := inputs[i].Column(0)
col.ResizeInt64(chkRows[i], false)
i64s := col.Int64s()
for j := 0; j < chkRows[i]; j++ {
if cnt == sameNum {
val = rand.Int63()
cnt = 0
nullPos--
}
if nullPos == 0 {
col.SetNull(j, true)
} else {
i64s[j] = val
}
cnt++
}
}
expr = make([]expression.Expression, 1)
expr[0] = &expression.Column{
RetType: &types.FieldType{Tp: mysql.TypeLonglong, Flen: mysql.MaxIntWidth},
Index: 0,
}
return
}
func (s *testExecSuite) TestVecGroupChecker(c *C) {
testCases := []struct {
chunkRows []int
expectedGroups int
expectedFlag []bool
sameNum int
}{
{
chunkRows: []int{1024, 1},
expectedGroups: 1025,
expectedFlag: []bool{false, false},
sameNum: 1,
},
{
chunkRows: []int{1024, 1},
expectedGroups: 1,
expectedFlag: []bool{false, true},
sameNum: 1025,
},
{
chunkRows: []int{1, 1},
expectedGroups: 1,
expectedFlag: []bool{false, true},
sameNum: 2,
},
{
chunkRows: []int{1, 1},
expectedGroups: 2,
expectedFlag: []bool{false, false},
sameNum: 1,
},
{
chunkRows: []int{2, 2},
expectedGroups: 2,
expectedFlag: []bool{false, false},
sameNum: 2,
},
{
chunkRows: []int{2, 2},
expectedGroups: 1,
expectedFlag: []bool{false, true},
sameNum: 4,
},
}
ctx := mock.NewContext()
for _, testCase := range testCases {
expr, inputChks := genTestChunk4VecGroupChecker(testCase.chunkRows, testCase.sameNum)
groupChecker := newVecGroupChecker(ctx, expr)
groupNum := 0
for i, inputChk := range inputChks {
flag, err := groupChecker.splitIntoGroups(inputChk)
c.Assert(err, IsNil)
c.Assert(flag, Equals, testCase.expectedFlag[i])
if flag {
groupNum += groupChecker.groupCount - 1
} else {
groupNum += groupChecker.groupCount
}
}
c.Assert(groupNum, Equals, testCase.expectedGroups)
}
}
func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, innerSrc, outerSrc Executor) Executor {
if joinType == plannercore.RightOuterJoin {
innerSrc, outerSrc = outerSrc, innerSrc
}
innerCols := innerSrc.Schema().Columns
outerCols := outerSrc.Schema().Columns
j := plannercore.BuildMergeJoinPlan(ctx, joinType, outerCols, innerCols)
j.SetChildren(&mockPlan{exec: outerSrc}, &mockPlan{exec: innerSrc})
cols := append(append([]*expression.Column{}, outerCols...), innerCols...)
schema := expression.NewSchema(cols...)
j.SetSchema(schema)
j.CompareFuncs = make([]expression.CompareFunc, 0, len(j.LeftJoinKeys))
for i := range j.LeftJoinKeys {
j.CompareFuncs = append(j.CompareFuncs, expression.GetCmpFunction(nil, j.LeftJoinKeys[i], j.RightJoinKeys[i]))
}
b := newExecutorBuilder(ctx, nil)
return b.build(j)
}
type mockPlan struct {
MockPhysicalPlan
exec Executor
}
func (mp *mockPlan) GetExecutor() Executor {
return mp.exec
}
func (mp *mockPlan) Schema() *expression.Schema {
return mp.exec.Schema()
}
func (s *testExecSuite) TestVecGroupCheckerDATARACE(c *C) {
ctx := mock.NewContext()
mTypes := []byte{mysql.TypeVarString, mysql.TypeNewDecimal, mysql.TypeJSON}
for _, mType := range mTypes {
exprs := make([]expression.Expression, 1)
exprs[0] = &expression.Column{
RetType: &types.FieldType{Tp: mType},
Index: 0,
}
vgc := newVecGroupChecker(ctx, exprs)
fts := []*types.FieldType{types.NewFieldType(mType)}
chk := chunk.New(fts, 1, 1)
vgc.allocateBuffer = func(evalType types.EvalType, capacity int) (*chunk.Column, error) {
return chk.Column(0), nil
}
vgc.releaseBuffer = func(column *chunk.Column) {}
switch mType {
case mysql.TypeVarString:
chk.Column(0).ReserveString(1)
chk.Column(0).AppendString("abc")
case mysql.TypeNewDecimal:
chk.Column(0).ResizeDecimal(1, false)
chk.Column(0).Decimals()[0] = *types.NewDecFromInt(123)
case mysql.TypeJSON:
chk.Column(0).ReserveJSON(1)
j := new(json.BinaryJSON)
c.Assert(j.UnmarshalJSON([]byte(fmt.Sprintf(`{"%v":%v}`, 123, 123))), IsNil)
chk.Column(0).AppendJSON(*j)
}
_, err := vgc.splitIntoGroups(chk)
c.Assert(err, IsNil)
switch mType {
case mysql.TypeVarString:
c.Assert(vgc.firstRowDatums[0].GetString(), Equals, "abc")
c.Assert(vgc.lastRowDatums[0].GetString(), Equals, "abc")
chk.Column(0).ReserveString(1)
chk.Column(0).AppendString("edf")
c.Assert(vgc.firstRowDatums[0].GetString(), Equals, "abc")
c.Assert(vgc.lastRowDatums[0].GetString(), Equals, "abc")
case mysql.TypeNewDecimal:
c.Assert(vgc.firstRowDatums[0].GetMysqlDecimal().String(), Equals, "123")
c.Assert(vgc.lastRowDatums[0].GetMysqlDecimal().String(), Equals, "123")
chk.Column(0).ResizeDecimal(1, false)
chk.Column(0).Decimals()[0] = *types.NewDecFromInt(456)
c.Assert(vgc.firstRowDatums[0].GetMysqlDecimal().String(), Equals, "123")
c.Assert(vgc.lastRowDatums[0].GetMysqlDecimal().String(), Equals, "123")
case mysql.TypeJSON:
c.Assert(vgc.firstRowDatums[0].GetMysqlJSON().String(), Equals, `{"123": 123}`)
c.Assert(vgc.lastRowDatums[0].GetMysqlJSON().String(), Equals, `{"123": 123}`)
chk.Column(0).ReserveJSON(1)
j := new(json.BinaryJSON)
c.Assert(j.UnmarshalJSON([]byte(fmt.Sprintf(`{"%v":%v}`, 456, 456))), IsNil)
chk.Column(0).AppendJSON(*j)
c.Assert(vgc.firstRowDatums[0].GetMysqlJSON().String(), Equals, `{"123": 123}`)
c.Assert(vgc.lastRowDatums[0].GetMysqlJSON().String(), Equals, `{"123": 123}`)
}
}
}
| executor/executor_required_rows_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0005812495364807546,
0.00017827814735937864,
0.00015995507419575006,
0.00017323432257398963,
0.00004319563595345244
] |
{
"id": 14,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\n",
"\t// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve\n",
"\ttxn2 := s.begin(c)\n",
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)\n",
"\tc.Assert(err, IsNil)\n",
"\n",
"\terr = txn1.Rollback()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1206
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"encoding/hex"
"math/rand"
"strings"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
pb "github.com/pingcap/kvproto/pkg/kvrpcpb"
tidbkv "github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/tikv/kv"
"github.com/pingcap/tidb/store/tikv/logutil"
"github.com/pingcap/tidb/store/tikv/metrics"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
type actionPessimisticLock struct {
*kv.LockCtx
}
type actionPessimisticRollback struct{}
var (
_ twoPhaseCommitAction = actionPessimisticLock{}
_ twoPhaseCommitAction = actionPessimisticRollback{}
)
func (actionPessimisticLock) String() string {
return "pessimistic_lock"
}
func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticLock
}
func (actionPessimisticRollback) String() string {
return "pessimistic_rollback"
}
func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer {
return metrics.TxnRegionsNumHistogramPessimisticRollback
}
func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
m := batch.mutations
mutations := make([]*pb.Mutation, m.Len())
for i := 0; i < m.Len(); i++ {
mut := &pb.Mutation{
Op: pb.Op_PessimisticLock,
Key: m.GetKey(i),
}
if c.txn.us.HasPresumeKeyNotExists(m.GetKey(i)) || (c.doingAmend && m.GetOp(i) == pb.Op_Insert) {
mut.Assertion = pb.Assertion_NotExist
}
mutations[i] = mut
}
elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond)
ttl := elapsed + atomic.LoadUint64(&ManagedLockTTL)
failpoint.Inject("shortPessimisticLockTTL", func() {
ttl = 1
keys := make([]string, 0, len(mutations))
for _, m := range mutations {
keys = append(keys, hex.EncodeToString(m.Key))
}
logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys))
})
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &pb.PessimisticLockRequest{
Mutations: mutations,
PrimaryLock: c.primary(),
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
LockTtl: ttl,
IsFirstLock: c.isFirstLock,
WaitTimeout: action.LockWaitTime,
ReturnValues: action.ReturnValues,
MinCommitTs: c.forUpdateTS + 1,
}, pb.Context{Priority: c.priority, SyncLog: c.syncLog})
lockWaitStartTime := action.WaitStartTime
for {
// if lockWaitTime set, refine the request `WaitTimeout` field based on timeout limit
if action.LockWaitTime > 0 {
timeLeft := action.LockWaitTime - (time.Since(lockWaitStartTime)).Milliseconds()
if timeLeft <= 0 {
req.PessimisticLock().WaitTimeout = tidbkv.LockNoWait
} else {
req.PessimisticLock().WaitTimeout = timeLeft
}
}
failpoint.Inject("PessimisticLockErrWriteConflict", func() error {
time.Sleep(300 * time.Millisecond)
return &kv.ErrWriteConflict{WriteConflict: nil}
})
startTime := time.Now()
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.LockRPCTime, int64(time.Since(startTime)))
atomic.AddInt64(&action.LockCtx.Stats.LockRPCCount, 1)
}
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticLockMutations(bo, action.LockCtx, batch.mutations)
return errors.Trace(err)
}
if resp.Resp == nil {
return errors.Trace(kv.ErrBodyMissing)
}
lockResp := resp.Resp.(*pb.PessimisticLockResponse)
keyErrs := lockResp.GetErrors()
if len(keyErrs) == 0 {
if action.ReturnValues {
action.ValuesLock.Lock()
for i, mutation := range mutations {
action.Values[string(mutation.Key)] = kv.ReturnedValue{Value: lockResp.Values[i]}
}
action.ValuesLock.Unlock()
}
return nil
}
var locks []*Lock
for _, keyErr := range keyErrs {
// Check already exists error
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
e := &kv.ErrKeyExist{AlreadyExist: alreadyExist}
return c.extractKeyExistsErr(e)
}
if deadlock := keyErr.Deadlock; deadlock != nil {
return &kv.ErrDeadlock{Deadlock: deadlock}
}
// Extract lock from key error
lock, err1 := extractLockFromKeyErr(keyErr)
if err1 != nil {
return errors.Trace(err1)
}
locks = append(locks, lock)
}
// Because we already waited on tikv, no need to Backoff here.
// tikv default will wait 3s(also the maximum wait value) when lock error occurs
startTime = time.Now()
msBeforeTxnExpired, _, err := c.store.lockResolver.ResolveLocks(bo, 0, locks)
if err != nil {
return errors.Trace(err)
}
if action.LockCtx.Stats != nil {
atomic.AddInt64(&action.LockCtx.Stats.ResolveLockTime, int64(time.Since(startTime)))
}
// If msBeforeTxnExpired is not zero, it means there are still locks blocking us acquiring
// the pessimistic lock. We should return acquire fail with nowait set or timeout error if necessary.
if msBeforeTxnExpired > 0 {
if action.LockWaitTime == tidbkv.LockNoWait {
return kv.ErrLockAcquireFailAndNoWaitSet
} else if action.LockWaitTime == tidbkv.LockAlwaysWait {
// do nothing but keep wait
} else {
// the lockWaitTime is set, we should return wait timeout if we are still blocked by a lock
if time.Since(lockWaitStartTime).Milliseconds() >= action.LockWaitTime {
return errors.Trace(kv.ErrLockWaitTimeout)
}
}
if action.LockCtx.PessimisticLockWaited != nil {
atomic.StoreInt32(action.LockCtx.PessimisticLockWaited, 1)
}
}
// Handle the killed flag when waiting for the pessimistic lock.
// When a txn runs into LockKeys() and backoff here, it has no chance to call
// executor.Next() and check the killed flag.
if action.Killed != nil {
// Do not reset the killed flag here!
// actionPessimisticLock runs on each region parallelly, we have to consider that
// the error may be dropped.
if atomic.LoadUint32(action.Killed) == 1 {
return errors.Trace(kv.ErrQueryInterrupted)
}
}
}
}
func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error {
req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &pb.PessimisticRollbackRequest{
StartVersion: c.startTS,
ForUpdateTs: c.forUpdateTS,
Keys: batch.mutations.GetKeys(),
})
resp, err := c.store.SendReq(bo, req, batch.region, ReadTimeoutShort)
if err != nil {
return errors.Trace(err)
}
regionErr, err := resp.GetRegionError()
if err != nil {
return errors.Trace(err)
}
if regionErr != nil {
err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String()))
if err != nil {
return errors.Trace(err)
}
err = c.pessimisticRollbackMutations(bo, batch.mutations)
return errors.Trace(err)
}
return nil
}
func (c *twoPhaseCommitter) pessimisticLockMutations(bo *Backoffer, lockCtx *kv.LockCtx, mutations CommitterMutations) error {
if c.sessionID > 0 {
failpoint.Inject("beforePessimisticLock", func(val failpoint.Value) {
// Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like
// `return("delay,fail")`. Then they will be executed sequentially at once.
if v, ok := val.(string); ok {
for _, action := range strings.Split(v, ",") {
if action == "delay" {
duration := time.Duration(rand.Int63n(int64(time.Second) * 5))
logutil.Logger(bo.ctx).Info("[failpoint] injected delay at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration))
time.Sleep(duration)
} else if action == "fail" {
logutil.Logger(bo.ctx).Info("[failpoint] injected failure at pessimistic lock",
zap.Uint64("txnStartTS", c.startTS))
failpoint.Return(errors.New("injected failure at pessimistic lock"))
}
}
}
})
}
return c.doActionOnMutations(bo, actionPessimisticLock{lockCtx}, mutations)
}
func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *Backoffer, mutations CommitterMutations) error {
return c.doActionOnMutations(bo, actionPessimisticRollback{}, mutations)
}
| store/tikv/pessimistic.go | 1 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.9961333274841309,
0.0791531354188919,
0.00016625328862573951,
0.0017475866479799151,
0.26115596294403076
] |
{
"id": 14,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\n",
"\t// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve\n",
"\ttxn2 := s.begin(c)\n",
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)\n",
"\tc.Assert(err, IsNil)\n",
"\n",
"\terr = txn1.Rollback()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1206
} | // Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"errors"
"sync/atomic"
"time"
"go.uber.org/zap"
"google.golang.org/grpc"
deadlockPb "github.com/pingcap/kvproto/pkg/deadlock"
"github.com/pingcap/log"
"github.com/pingcap/tidb/store/mockstore/unistore/pd"
"github.com/pingcap/tidb/store/mockstore/unistore/util/lockwaiter"
)
// Follower will send detection rpc to Leader
const (
Follower = iota
Leader
)
// DetectorServer represents a detector server.
type DetectorServer struct {
Detector *Detector
role int32
}
// Detect detects deadlock.
func (ds *DetectorServer) Detect(req *deadlockPb.DeadlockRequest) *deadlockPb.DeadlockResponse {
switch req.Tp {
case deadlockPb.DeadlockRequestType_Detect:
err := ds.Detector.Detect(req.Entry.Txn, req.Entry.WaitForTxn, req.Entry.KeyHash)
if err != nil {
resp := convertErrToResp(err, req.Entry.Txn, req.Entry.WaitForTxn, req.Entry.KeyHash)
return resp
}
case deadlockPb.DeadlockRequestType_CleanUpWaitFor:
ds.Detector.CleanUpWaitFor(req.Entry.Txn, req.Entry.WaitForTxn, req.Entry.KeyHash)
case deadlockPb.DeadlockRequestType_CleanUp:
ds.Detector.CleanUp(req.Entry.Txn)
}
return nil
}
// DetectorClient represents a detector client.
type DetectorClient struct {
pdClient pd.Client
sendCh chan *deadlockPb.DeadlockRequest
waitMgr *lockwaiter.Manager
streamCli deadlockPb.Deadlock_DetectClient
streamCancel context.CancelFunc
streamConn *grpc.ClientConn
}
// getLeaderAddr will send request to pd to find out the
// current leader node for the first region
func (dt *DetectorClient) getLeaderAddr() (string, error) {
// find first region from pd, get the first region leader
ctx := context.Background()
region, err := dt.pdClient.GetRegion(ctx, []byte{})
if err != nil {
log.Error("get first region failed", zap.Error(err))
return "", err
}
if region.Leader == nil {
return "", errors.New("no leader")
}
leaderStoreMeta, err := dt.pdClient.GetStore(ctx, region.Leader.GetStoreId())
if err != nil {
log.Error("get store failed", zap.Uint64("id", region.Leader.GetStoreId()), zap.Error(err))
return "", err
}
log.Warn("getLeaderAddr", zap.Stringer("leader peer", region.Leader), zap.String("addr", leaderStoreMeta.GetAddress()))
return leaderStoreMeta.GetAddress(), nil
}
// rebuildStreamClient builds connection to the first region leader,
// it's not thread safe and should be called only by `DetectorClient.Start` or `DetectorClient.SendReqLoop`
func (dt *DetectorClient) rebuildStreamClient() error {
leaderAddr, err := dt.getLeaderAddr()
if err != nil {
return err
}
cc, err := grpc.Dial(leaderAddr, grpc.WithInsecure())
if err != nil {
return err
}
if dt.streamConn != nil {
err = dt.streamConn.Close()
if err != nil {
return err
}
}
dt.streamConn = cc
ctx, cancel := context.WithCancel(context.Background())
stream, err := deadlockPb.NewDeadlockClient(cc).Detect(ctx)
if err != nil {
cancel()
return err
}
log.Info("build stream client successfully", zap.String("leader addr", leaderAddr))
dt.streamCli = stream
dt.streamCancel = cancel
go dt.recvLoop(dt.streamCli)
return nil
}
// NewDetectorClient will create a new detector util, entryTTL is used for
// recycling the lock wait edge in detector wait wap. chSize is the pending
// detection sending task size(used on non leader node)
func NewDetectorClient(waiterMgr *lockwaiter.Manager, pdClient pd.Client) *DetectorClient {
chSize := 10000
newDetector := &DetectorClient{
sendCh: make(chan *deadlockPb.DeadlockRequest, chSize),
waitMgr: waiterMgr,
pdClient: pdClient,
}
return newDetector
}
// sendReqLoop will send detection request to leader, stream connection will be rebuilt and
// a new recv goroutine using the same stream client will be created
func (dt *DetectorClient) sendReqLoop() {
var (
err error
rebuildErr error
req *deadlockPb.DeadlockRequest
)
for {
if dt.streamCli == nil {
rebuildErr = dt.rebuildStreamClient()
if rebuildErr != nil {
log.Error("rebuild connection to first region failed", zap.Error(rebuildErr))
time.Sleep(3 * time.Second)
continue
}
}
req = <-dt.sendCh
err = dt.streamCli.Send(req)
if err != nil {
log.Warn("send failed, invalid current stream and try to rebuild connection", zap.Error(err))
dt.streamCancel()
dt.streamCli = nil
}
}
}
// recvLoop tries to recv response(current only deadlock error) from leader, break loop if errors happen
func (dt *DetectorClient) recvLoop(streamCli deadlockPb.Deadlock_DetectClient) {
var (
err error
resp *deadlockPb.DeadlockResponse
)
for {
resp, err = streamCli.Recv()
if err != nil {
log.Warn("recv from failed, stop receive", zap.Error(err))
break
}
// here only detection request will get response from leader
dt.waitMgr.WakeUpForDeadlock(resp)
}
}
func (dt *DetectorClient) handleRemoteTask(requestType deadlockPb.DeadlockRequestType,
txnTs uint64, waitForTxnTs uint64, keyHash uint64) {
detectReq := &deadlockPb.DeadlockRequest{}
detectReq.Tp = requestType
detectReq.Entry.Txn = txnTs
detectReq.Entry.WaitForTxn = waitForTxnTs
detectReq.Entry.KeyHash = keyHash
dt.sendCh <- detectReq
}
// CleanUp processes cleaup task on local detector
// user interfaces
func (dt *DetectorClient) CleanUp(startTs uint64) {
dt.handleRemoteTask(deadlockPb.DeadlockRequestType_CleanUp, startTs, 0, 0)
}
// CleanUpWaitFor cleans up the specific wait edge in detector's wait map
func (dt *DetectorClient) CleanUpWaitFor(txnTs, waitForTxn, keyHash uint64) {
dt.handleRemoteTask(deadlockPb.DeadlockRequestType_CleanUpWaitFor, txnTs, waitForTxn, keyHash)
}
// Detect post the detection request to local deadlock detector or remote first region leader,
// the caller should use `waiter.ch` to receive possible deadlock response
func (dt *DetectorClient) Detect(txnTs uint64, waitForTxnTs uint64, keyHash uint64) {
dt.handleRemoteTask(deadlockPb.DeadlockRequestType_Detect, txnTs, waitForTxnTs, keyHash)
}
// convertErrToResp converts `ErrDeadlock` to `DeadlockResponse` proto type
func convertErrToResp(errDeadlock *ErrDeadlock, txnTs, waitForTxnTs, keyHash uint64) *deadlockPb.DeadlockResponse {
entry := deadlockPb.WaitForEntry{}
entry.Txn = txnTs
entry.WaitForTxn = waitForTxnTs
entry.KeyHash = keyHash
resp := &deadlockPb.DeadlockResponse{}
resp.Entry = entry
resp.DeadlockKeyHash = errDeadlock.DeadlockKeyHash
return resp
}
// NewDetectorServer creates local detector used by RPC detection handler
func NewDetectorServer() *DetectorServer {
entryTTL := 3 * time.Second
urgentSize := uint64(100000)
exipreInterval := 3600 * time.Second
svr := &DetectorServer{
Detector: NewDetector(entryTTL, urgentSize, exipreInterval),
}
return svr
}
// IsLeader returns whether the server is leader or not.
func (ds *DetectorServer) IsLeader() bool {
return atomic.LoadInt32(&ds.role) == Leader
}
// ChangeRole changes the server role.
func (ds *DetectorServer) ChangeRole(newRole int32) {
atomic.StoreInt32(&ds.role, newRole)
}
| store/mockstore/unistore/tikv/deadlock.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0016289768973365426,
0.00024074746761471033,
0.00016063294606283307,
0.0001653367216931656,
0.000293341203359887
] |
{
"id": 14,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\n",
"\t// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve\n",
"\ttxn2 := s.begin(c)\n",
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)\n",
"\tc.Assert(err, IsNil)\n",
"\n",
"\terr = txn1.Rollback()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1206
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import (
"math"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/check"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
)
func (s *testChunkSuite) TestList(c *check.C) {
fields := []*types.FieldType{
types.NewFieldType(mysql.TypeLonglong),
}
l := NewList(fields, 2, 2)
srcChunk := NewChunkWithCapacity(fields, 32)
srcChunk.AppendInt64(0, 1)
srcRow := srcChunk.GetRow(0)
// Test basic append.
for i := 0; i < 5; i++ {
l.AppendRow(srcRow)
}
c.Assert(l.NumChunks(), check.Equals, 3)
c.Assert(l.Len(), check.Equals, 5)
c.Assert(len(l.freelist), check.Equals, 0)
// Test chunk reuse.
l.Reset()
c.Assert(len(l.freelist), check.Equals, 3)
for i := 0; i < 5; i++ {
l.AppendRow(srcRow)
}
c.Assert(len(l.freelist), check.Equals, 0)
// Test add chunk then append row.
l.Reset()
nChunk := NewChunkWithCapacity(fields, 32)
nChunk.AppendNull(0)
l.Add(nChunk)
ptr := l.AppendRow(srcRow)
c.Assert(l.NumChunks(), check.Equals, 2)
c.Assert(ptr.ChkIdx, check.Equals, uint32(1))
c.Assert(ptr.RowIdx, check.Equals, uint32(0))
row := l.GetRow(ptr)
c.Assert(row.GetInt64(0), check.Equals, int64(1))
// Test iteration.
l.Reset()
for i := 0; i < 5; i++ {
tmp := NewChunkWithCapacity(fields, 32)
tmp.AppendInt64(0, int64(i))
l.AppendRow(tmp.GetRow(0))
}
expected := []int64{0, 1, 2, 3, 4}
var results []int64
err := l.Walk(func(r Row) error {
results = append(results, r.GetInt64(0))
return nil
})
c.Assert(err, check.IsNil)
c.Assert(results, check.DeepEquals, expected)
}
func (s *testChunkSuite) TestListMemoryUsage(c *check.C) {
fieldTypes := make([]*types.FieldType, 0, 5)
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeFloat})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeVarchar})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeJSON})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeDatetime})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeDuration})
jsonObj, err := json.ParseBinaryFromString("1")
c.Assert(err, check.IsNil)
timeObj := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeDatetime, 0)
durationObj := types.Duration{Duration: math.MaxInt64, Fsp: 0}
maxChunkSize := 2
srcChk := NewChunkWithCapacity(fieldTypes, maxChunkSize)
srcChk.AppendFloat32(0, 12.4)
srcChk.AppendString(1, "123")
srcChk.AppendJSON(2, jsonObj)
srcChk.AppendTime(3, timeObj)
srcChk.AppendDuration(4, durationObj)
list := NewList(fieldTypes, maxChunkSize, maxChunkSize*2)
c.Assert(list.GetMemTracker().BytesConsumed(), check.Equals, int64(0))
list.AppendRow(srcChk.GetRow(0))
c.Assert(list.GetMemTracker().BytesConsumed(), check.Equals, int64(0))
memUsage := list.chunks[0].MemoryUsage()
list.Reset()
c.Assert(list.GetMemTracker().BytesConsumed(), check.Equals, memUsage)
list.Add(srcChk)
c.Assert(list.GetMemTracker().BytesConsumed(), check.Equals, memUsage+srcChk.MemoryUsage())
}
func (s *testChunkSuite) TestListPrePreAlloc4RowAndInsert(c *check.C) {
fieldTypes := make([]*types.FieldType, 0, 4)
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeFloat})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeNewDecimal})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeVarchar})
srcChk := NewChunkWithCapacity(fieldTypes, 10)
for i := int64(0); i < 10; i++ {
srcChk.AppendFloat32(0, float32(i))
srcChk.AppendInt64(1, i)
srcChk.AppendMyDecimal(2, types.NewDecFromInt(i))
srcChk.AppendString(3, strings.Repeat(strconv.FormatInt(i, 10), int(i)))
}
srcList := NewList(fieldTypes, 3, 3)
destList := NewList(fieldTypes, 5, 5)
destRowPtr := make([]RowPtr, srcChk.NumRows())
for i := 0; i < srcChk.NumRows(); i++ {
srcList.AppendRow(srcChk.GetRow(i))
destRowPtr[i] = destList.preAlloc4Row(srcChk.GetRow(i))
}
c.Assert(srcList.NumChunks(), check.Equals, 4)
c.Assert(destList.NumChunks(), check.Equals, 2)
iter4Src := NewIterator4List(srcList)
for row, i := iter4Src.Begin(), 0; row != iter4Src.End(); row, i = iter4Src.Next(), i+1 {
destList.Insert(destRowPtr[i], row)
}
iter4Dest := NewIterator4List(destList)
srcRow, destRow := iter4Src.Begin(), iter4Dest.Begin()
for ; srcRow != iter4Src.End(); srcRow, destRow = iter4Src.Next(), iter4Dest.Next() {
c.Assert(srcRow.GetFloat32(0), check.Equals, destRow.GetFloat32(0))
c.Assert(srcRow.GetInt64(1), check.Equals, destRow.GetInt64(1))
c.Assert(srcRow.GetMyDecimal(2).Compare(destRow.GetMyDecimal(2)) == 0, check.IsTrue)
c.Assert(srcRow.GetString(3), check.Equals, destRow.GetString(3))
}
}
func BenchmarkListMemoryUsage(b *testing.B) {
fieldTypes := make([]*types.FieldType, 0, 4)
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeFloat})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeVarchar})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeDatetime})
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeDuration})
chk := NewChunkWithCapacity(fieldTypes, 2)
timeObj := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeDatetime, 0)
durationObj := types.Duration{Duration: math.MaxInt64, Fsp: 0}
chk.AppendFloat64(0, 123.123)
chk.AppendString(1, "123")
chk.AppendTime(2, timeObj)
chk.AppendDuration(3, durationObj)
row := chk.GetRow(0)
initCap := 50
list := NewList(fieldTypes, 2, 8)
for i := 0; i < initCap; i++ {
list.AppendRow(row)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
list.GetMemTracker().BytesConsumed()
}
}
func BenchmarkPreAllocList(b *testing.B) {
fieldTypes := make([]*types.FieldType, 0, 1)
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong})
chk := NewChunkWithCapacity(fieldTypes, 1)
chk.AppendInt64(0, 1)
row := chk.GetRow(0)
b.ResetTimer()
list := NewList(fieldTypes, 1024, 1024)
for i := 0; i < b.N; i++ {
list.Reset()
// 32768 indicates the number of int64 rows to fill 256KB L2 cache.
for j := 0; j < 32768; j++ {
list.preAlloc4Row(row)
}
}
}
func BenchmarkPreAllocChunk(b *testing.B) {
fieldTypes := make([]*types.FieldType, 0, 1)
fieldTypes = append(fieldTypes, &types.FieldType{Tp: mysql.TypeLonglong})
chk := NewChunkWithCapacity(fieldTypes, 1)
chk.AppendInt64(0, 1)
row := chk.GetRow(0)
b.ResetTimer()
finalChk := New(fieldTypes, 33000, 1024)
for i := 0; i < b.N; i++ {
finalChk.Reset()
for j := 0; j < 32768; j++ {
finalChk.preAlloc(row)
}
}
}
func BenchmarkListAdd(b *testing.B) {
numChk, numRow := 1, 2
chks, fields := initChunks(numChk, numRow)
chk := chks[0]
l := NewList(fields, numRow, numRow)
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(chk)
}
}
func BenchmarkListGetRow(b *testing.B) {
numChk, numRow := 10000, 2
chks, fields := initChunks(numChk, numRow)
l := NewList(fields, numRow, numRow)
for _, chk := range chks {
l.Add(chk)
}
rand.Seed(0)
ptrs := make([]RowPtr, 0, b.N)
for i := 0; i < mathutil.Min(b.N, 10000); i++ {
ptrs = append(ptrs, RowPtr{
ChkIdx: rand.Uint32() % uint32(numChk),
RowIdx: rand.Uint32() % uint32(numRow),
})
}
for i := 10000; i < cap(ptrs); i++ {
ptrs = append(ptrs, ptrs[i%10000])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.GetRow(ptrs[i])
}
}
| util/chunk/list_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.000176350207766518,
0.00017117775860242546,
0.0001596695074113086,
0.00017256494902539998,
0.000004564097707770998
] |
{
"id": 14,
"code_window": [
"\tc.Assert(err, IsNil)\n",
"\n",
"\t// txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve\n",
"\ttxn2 := s.begin(c)\n",
"\ttxn2.SetOption(kv.Pessimistic, true)\n",
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tidbkv.LockNoWait}\n",
"\terr = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey)\n",
"\tc.Assert(err, IsNil)\n",
"\n",
"\terr = txn1.Rollback()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tlockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait}\n"
],
"file_path": "store/tikv/tests/2pc_test.go",
"type": "replace",
"edit_start_line_idx": 1206
} | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
. "github.com/pingcap/check"
)
var _ = Suite(&testClusterHardwareSuite{})
type testClusterHardwareSuite struct{}
func (s *testClusterHardwareSuite) TestNormalizeDiskName(c *C) {
c.Parallel()
c.Assert(normalizeDiskName("/dev/sdb"), Equals, "sdb")
c.Assert(normalizeDiskName("sda"), Equals, "sda")
}
func (s *testClusterHardwareSuite) TestIsNormalizedDiskNameAllowed(c *C) {
c.Parallel()
passList := []string{"disk1s4", "rootfs", "devtmpfs", "sda", "sda1", "sdb", "sdb3", "sdc", "nvme0", "nvme0n1", "nvme0n1p0", "md127", "mdisk1s4"}
for _, n := range passList {
c.Assert(isNormalizedDiskNameAllowed(n), Equals, true)
}
failList := []string{"foo", "/rootfs", "asmdisk01p1"}
for _, n := range failList {
c.Assert(isNormalizedDiskNameAllowed(n), Equals, false)
}
}
func (s *testClusterHardwareSuite) TestNormalizeFieldName(c *C) {
c.Parallel()
c.Assert(normalizeFieldName("deviceName"), Equals, "deviceName")
c.Assert(normalizeFieldName("device-name"), Equals, "deviceName")
c.Assert(normalizeFieldName("device_name"), Equals, "deviceName")
c.Assert(normalizeFieldName("l1-cache-size"), Equals, "l1CacheSize")
c.Assert(normalizeFieldName("free-percent"), Equals, "freePercent")
}
| telemetry/data_cluster_hardware_test.go | 0 | https://github.com/pingcap/tidb/commit/c25042543ad6496849ba7547f81b2f0a986d35cd | [
0.0019409401575103402,
0.00046390146599151194,
0.00016205167048610747,
0.0001694361853878945,
0.0006605659727938473
] |
{
"id": 0,
"code_window": [
"\n",
"func (s *DryRunnableStorage) GuaranteedUpdate(\n",
"\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error {\n",
"\tif dryRun {\n",
"\t\terr := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 80
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9905785918235779,
0.06824507564306259,
0.0001657162792980671,
0.00044796805013902485,
0.22530190646648407
] |
{
"id": 0,
"code_window": [
"\n",
"func (s *DryRunnableStorage) GuaranteedUpdate(\n",
"\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error {\n",
"\tif dryRun {\n",
"\t\terr := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 80
} | // +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
| pkg/controller/ttlafterfinished/config/v1alpha1/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017636636039242148,
0.0001733401441015303,
0.00016923800285439938,
0.00017441605450585485,
0.0000030079411317274207
] |
{
"id": 0,
"code_window": [
"\n",
"func (s *DryRunnableStorage) GuaranteedUpdate(\n",
"\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error {\n",
"\tif dryRun {\n",
"\t\terr := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 80
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file exists to enforce this clientset's vanity import path.
package kubernetes // import "k8s.io/client-go/kubernetes"
| staging/src/k8s.io/client-go/kubernetes/import.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017757488240022212,
0.0001767890207702294,
0.00017600315914023668,
0.0001767890207702294,
7.858616299927235e-7
] |
{
"id": 0,
"code_window": [
"\n",
"func (s *DryRunnableStorage) GuaranteedUpdate(\n",
"\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, suggestion runtime.Object) error {\n",
"\tif dryRun {\n",
"\t\terr := s.Storage.Get(ctx, key, storage.GetOptions{IgnoreNotFound: ignoreNotFound}, ptrToType)\n",
"\t\tif err != nil {\n",
"\t\t\treturn err\n",
"\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, dryRun bool, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 80
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package
// +k8s:defaulter-gen=TypeMeta
// +groupName=example.test.apiserver.code-generator.k8s.io
// +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example2
// +groupGoName=SecondExample
package v1
| staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0001775466080289334,
0.00017461308743804693,
0.00017087471496779472,
0.00017541793931741267,
0.0000027826101813843707
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn s.copyInto(out, ptrToType)\n",
"\t}\n",
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion)\n",
"}\n",
"\n",
"func (s *DryRunnableStorage) Count(key string) (int64, error) {\n",
"\treturn s.Storage.Count(key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 100
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9828007221221924,
0.07793749868869781,
0.00016616849461570382,
0.0004884222289547324,
0.24094758927822113
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn s.copyInto(out, ptrToType)\n",
"\t}\n",
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion)\n",
"}\n",
"\n",
"func (s *DryRunnableStorage) Count(key string) (int64, error) {\n",
"\treturn s.Storage.Count(key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 100
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"top.go",
"top_node.go",
"top_pod.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/kubectl/pkg/cmd/top",
importpath = "k8s.io/kubectl/pkg/cmd/top",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/metricsutil:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/i18n:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/templates:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"top_node_test.go",
"top_pod_test.go",
"top_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/client-go/rest/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/cmd/testing:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| staging/src/k8s.io/kubectl/pkg/cmd/top/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017414950707461685,
0.0001717088307486847,
0.00016940395289566368,
0.00017157661204691976,
0.0000016354636045434745
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn s.copyInto(out, ptrToType)\n",
"\t}\n",
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion)\n",
"}\n",
"\n",
"func (s *DryRunnableStorage) Count(key string) (int64, error) {\n",
"\treturn s.Storage.Count(key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 100
} | #!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks commonly misspelled English words in all files in the
# working directory by client9/misspell package.
# Usage: `hack/verify-spelling.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
export KUBE_ROOT
source "${KUBE_ROOT}/hack/lib/init.sh"
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KUBE_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Install tools we need
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
popd >/dev/null
# Spell checking
# All the skipping files are defined in hack/.spelling_failures
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
| hack/verify-spelling.sh | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017592495714779943,
0.00017082503472920507,
0.00016445948858745396,
0.00017088971799239516,
0.000004196175723336637
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\treturn s.copyInto(out, ptrToType)\n",
"\t}\n",
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, suggestion)\n",
"}\n",
"\n",
"func (s *DryRunnableStorage) Count(key string) (int64, error) {\n",
"\treturn s.Storage.Count(key)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\treturn s.Storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/registry/generic/registry/dryrun.go",
"type": "replace",
"edit_start_line_idx": 100
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpoints
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"regexp"
"sync"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
genericapitesting "k8s.io/apiserver/pkg/endpoints/testing"
"k8s.io/apiserver/pkg/registry/rest"
)
type fakeAuditSink struct {
lock sync.Mutex
events []*auditinternal.Event
}
func (s *fakeAuditSink) ProcessEvents(evs ...*auditinternal.Event) bool {
s.lock.Lock()
defer s.lock.Unlock()
for _, ev := range evs {
e := ev.DeepCopy()
s.events = append(s.events, e)
}
return true
}
func (s *fakeAuditSink) Events() []*auditinternal.Event {
s.lock.Lock()
defer s.lock.Unlock()
return append([]*auditinternal.Event{}, s.events...)
}
func TestAudit(t *testing.T) {
type eventCheck func(events []*auditinternal.Event) error
// fixtures
simpleFoo := &genericapitesting.Simple{Other: "foo"}
simpleFooJSON, _ := runtime.Encode(testCodec, simpleFoo)
simpleCPrime := &genericapitesting.Simple{
ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "other"},
Other: "bla",
}
simpleCPrimeJSON, _ := runtime.Encode(testCodec, simpleCPrime)
userAgent := "audit-test"
// event checks
noRequestBody := func(i int) eventCheck {
return func(events []*auditinternal.Event) error {
if events[i].RequestObject == nil {
return nil
}
return fmt.Errorf("expected RequestBody to be nil, got non-nil '%s'", events[i].RequestObject.Raw)
}
}
requestBodyIs := func(i int, text string) eventCheck {
return func(events []*auditinternal.Event) error {
if events[i].RequestObject == nil {
if text != "" {
return fmt.Errorf("expected RequestBody %q, got <nil>", text)
}
return nil
}
if string(events[i].RequestObject.Raw) != text {
return fmt.Errorf("expected RequestBody %q, got %q", text, string(events[i].RequestObject.Raw))
}
return nil
}
}
requestBodyMatches := func(i int, pattern string) eventCheck {
return func(events []*auditinternal.Event) error {
if events[i].RequestObject == nil {
return fmt.Errorf("expected non nil request object")
}
if matched, _ := regexp.Match(pattern, events[i].RequestObject.Raw); !matched {
return fmt.Errorf("expected RequestBody to match %q, but didn't: %q", pattern, string(events[i].RequestObject.Raw))
}
return nil
}
}
noResponseBody := func(i int) eventCheck {
return func(events []*auditinternal.Event) error {
if events[i].ResponseObject == nil {
return nil
}
return fmt.Errorf("expected ResponseBody to be nil, got non-nil '%s'", events[i].ResponseObject.Raw)
}
}
responseBodyMatches := func(i int, pattern string) eventCheck {
return func(events []*auditinternal.Event) error {
if events[i].ResponseObject == nil {
return fmt.Errorf("expected non nil response object")
}
if matched, _ := regexp.Match(pattern, events[i].ResponseObject.Raw); !matched {
return fmt.Errorf("expected ResponseBody to match %q, but didn't: %q", pattern, string(events[i].ResponseObject.Raw))
}
return nil
}
}
requestUserAgentMatches := func(userAgent string) eventCheck {
return func(events []*auditinternal.Event) error {
for i := range events {
if events[i].UserAgent != userAgent {
return fmt.Errorf("expected request user agent to match %q, but got: %q", userAgent, events[i].UserAgent)
}
}
return nil
}
}
expectedStages := func(stages ...auditinternal.Stage) eventCheck {
return func(events []*auditinternal.Event) error {
if len(stages) != len(events) {
return fmt.Errorf("expected %d stages, but got %d events", len(stages), len(events))
}
for i, stage := range stages {
if events[i].Stage != stage {
return fmt.Errorf("expected stage %q, got %q", stage, events[i].Stage)
}
}
return nil
}
}
for _, test := range []struct {
desc string
req func(server string) (*http.Request, error)
linker runtime.SelfLinker
code int
events int
checks []eventCheck
}{
{
"get",
func(server string) (*http.Request, error) {
return http.NewRequest("GET", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/other/simple/c", bytes.NewBuffer(simpleFooJSON))
},
selfLinker,
200,
2,
[]eventCheck{
noRequestBody(1),
responseBodyMatches(1, `{.*"name":"c".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"list",
func(server string) (*http.Request, error) {
return http.NewRequest("GET", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/other/simple?labelSelector=a%3Dfoobar", nil)
},
&setTestSelfLinker{
t: t,
expectedSet: "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/namespaces/other/simple",
namespace: "other",
},
200,
2,
[]eventCheck{
noRequestBody(1),
responseBodyMatches(1, `{.*"name":"a".*"name":"b".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"create",
func(server string) (*http.Request, error) {
return http.NewRequest("POST", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/default/simple", bytes.NewBuffer(simpleFooJSON))
},
selfLinker,
201,
2,
[]eventCheck{
requestBodyIs(1, string(simpleFooJSON)),
responseBodyMatches(1, `{.*"foo".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"not-allowed-named-create",
func(server string) (*http.Request, error) {
return http.NewRequest("POST", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/default/simple/named", bytes.NewBuffer(simpleFooJSON))
},
selfLinker,
405,
2,
[]eventCheck{
noRequestBody(1), // the 405 is thrown long before the create handler would be executed
noResponseBody(1), // the 405 is thrown long before the create handler would be executed
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"delete",
func(server string) (*http.Request, error) {
return http.NewRequest("DELETE", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/default/simple/a", nil)
},
selfLinker,
200,
2,
[]eventCheck{
noRequestBody(1),
responseBodyMatches(1, `{.*"kind":"Status".*"status":"Success".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"delete-with-options-in-body",
func(server string) (*http.Request, error) {
return http.NewRequest("DELETE", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/default/simple/a", bytes.NewBuffer([]byte(`{"kind":"DeleteOptions"}`)))
},
selfLinker,
200,
2,
[]eventCheck{
requestBodyMatches(1, "DeleteOptions"),
responseBodyMatches(1, `{.*"kind":"Status".*"status":"Success".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"update",
func(server string) (*http.Request, error) {
return http.NewRequest("PUT", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/other/simple/c", bytes.NewBuffer(simpleCPrimeJSON))
},
selfLinker,
200,
2,
[]eventCheck{
requestBodyIs(1, string(simpleCPrimeJSON)),
responseBodyMatches(1, `{.*"bla".*}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"update-wrong-namespace",
func(server string) (*http.Request, error) {
return http.NewRequest("PUT", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/default/simple/c", bytes.NewBuffer(simpleCPrimeJSON))
},
selfLinker,
400,
2,
[]eventCheck{
requestBodyIs(1, string(simpleCPrimeJSON)),
responseBodyMatches(1, `"Status".*"status":"Failure".*"code":400}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"patch",
func(server string) (*http.Request, error) {
req, _ := http.NewRequest("PATCH", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/other/simple/c", bytes.NewReader([]byte(`{"labels":{"foo":"bar"}}`)))
req.Header.Set("Content-Type", "application/merge-patch+json; charset=UTF-8")
return req, nil
},
&setTestSelfLinker{
t: t,
expectedSet: "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/namespaces/other/simple/c",
name: "c",
namespace: "other",
},
200,
2,
[]eventCheck{
requestBodyIs(1, `{"labels":{"foo":"bar"}}`),
responseBodyMatches(1, `"name":"c".*"labels":{"foo":"bar"}`),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseComplete),
},
},
{
"watch",
func(server string) (*http.Request, error) {
return http.NewRequest("GET", server+"/"+prefix+"/"+testGroupVersion.Group+"/"+testGroupVersion.Version+"/namespaces/other/simple?watch=true", nil)
},
&setTestSelfLinker{
t: t,
expectedSet: "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/namespaces/other/simple",
namespace: "other",
},
200,
3,
[]eventCheck{
noRequestBody(2),
noResponseBody(2),
expectedStages(auditinternal.StageRequestReceived, auditinternal.StageResponseStarted, auditinternal.StageResponseComplete),
},
},
} {
sink := &fakeAuditSink{}
handler := handleInternal(map[string]rest.Storage{
"simple": &SimpleRESTStorage{
list: []genericapitesting.Simple{
{
ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "other"},
Other: "foo",
},
{
ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "other"},
Other: "foo",
},
},
item: genericapitesting.Simple{
ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "other", UID: "uid"},
Other: "foo",
},
},
}, admissionControl, selfLinker, sink)
server := httptest.NewServer(handler)
defer server.Close()
client := http.Client{Timeout: 2 * time.Second}
req, err := test.req(server.URL)
if err != nil {
t.Errorf("[%s] error creating the request: %v", test.desc, err)
}
req.Header.Set("User-Agent", userAgent)
response, err := client.Do(req)
if err != nil {
t.Errorf("[%s] error: %v", test.desc, err)
}
if response.StatusCode != test.code {
t.Errorf("[%s] expected http code %d, got %#v", test.desc, test.code, response)
}
// close body because the handler might block in Flush, unable to send the remaining event.
response.Body.Close()
// wait for events to arrive, at least the given number in the test
events := []*auditinternal.Event{}
err = wait.Poll(50*time.Millisecond, wait.ForeverTestTimeout, wait.ConditionFunc(func() (done bool, err error) {
events = sink.Events()
return len(events) >= test.events, nil
}))
if err != nil {
t.Errorf("[%s] timeout waiting for events", test.desc)
}
if got := len(events); got != test.events {
t.Errorf("[%s] expected %d audit events, got %d", test.desc, test.events, got)
} else {
for i, check := range test.checks {
err := check(events)
if err != nil {
t.Errorf("[%s,%d] %v", test.desc, i, err)
}
}
if err := requestUserAgentMatches(userAgent)(events); err != nil {
t.Errorf("[%s] %v", test.desc, err)
}
}
if len(events) > 0 {
status := events[len(events)-1].ResponseStatus
if status == nil {
t.Errorf("[%s] expected non-nil ResponseStatus in last event", test.desc)
} else if int(status.Code) != test.code {
t.Errorf("[%s] expected ResponseStatus.Code=%d, got %d", test.desc, test.code, status.Code)
}
}
}
}
| staging/src/k8s.io/apiserver/pkg/endpoints/audit_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0028006993234157562,
0.00032055345945991576,
0.00016616152424830943,
0.00017263839254155755,
0.0004925202811136842
] |
{
"id": 2,
"code_window": [
"\t// Ignore the suggestion and try to pass down the current version of the object\n",
"\t// read from cache.\n",
"\tif elem, exists, err := c.watchCache.GetByKey(key); err != nil {\n",
"\t\tklog.Errorf(\"GetByKey returned error: %v\", err)\n",
"\t} else if exists {\n",
"\t\tcurrObj := elem.(*storeElement).Object.DeepCopyObject()\n",
"\t\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)\n",
"\t}\n",
"\t// If we couldn't get the object, fallback to no-suggestion.\n",
"\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// DeepCopy the object since we modify resource version when serializing the\n",
"\t\t// current object.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go",
"type": "add",
"edit_start_line_idx": 754
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
// Versioner abstracts setting and retrieving metadata fields from database response
// onto the object ot list. It is required to maintain storage invariants - updating an
// object twice with the same data except for the ResourceVersion and SelfLink must be
// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion,
// intended to be sent directly to or from the backend. A resourceVersion of
// type string is a 'safe' resourceVersion, intended for consumption by users.
type Versioner interface {
// UpdateObject sets storage metadata into an API object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata
// from database.
UpdateObject(obj runtime.Object, resourceVersion uint64) error
// UpdateList sets the resource version into an API list object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata from
// database. continueValue is optional and indicates that more results are available if the client
// passes that value to the server in a subsequent call. remainingItemCount indicates the number
// of remaining objects if the list is partial. The remainingItemCount field is omitted during
// serialization if it is set to nil.
UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error
// PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should
// return an error if the specified object cannot be updated.
PrepareObjectForStorage(obj runtime.Object) error
// ObjectResourceVersion returns the resource version (for persistence) of the specified object.
// Should return an error if the specified object does not have a persistable version.
ObjectResourceVersion(obj runtime.Object) (uint64, error)
// ParseResourceVersion takes a resource version argument and
// converts it to the storage backend. For watch we should pass to helper.Watch().
// Because resourceVersion is an opaque value, the default watch
// behavior for non-zero watch is to watch the next value (if you pass
// "1", you will see updates from "2" onwards).
ParseResourceVersion(resourceVersion string) (uint64, error)
}
// ResponseMeta contains information about the database metadata that is associated with
// an object. It abstracts the actual underlying objects to prevent coupling with concrete
// database and to improve testability.
type ResponseMeta struct {
// TTL is the time to live of the node that contained the returned object. It may be
// zero or negative in some cases (objects may be expired after the requested
// expiration time due to server lag).
TTL int64
// The resource version of the node that contained the returned object.
ResourceVersion uint64
}
// IndexerFunc is a function that for a given object computes
// <value of an index> for a particular <index>.
type IndexerFunc func(obj runtime.Object) string
// IndexerFuncs is a mapping from <index name> to function that
// for a given object computes <value for that index>.
type IndexerFuncs map[string]IndexerFunc
// Everything accepts all objects.
var Everything = SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
}
// MatchValue defines a pair (<index name>, <value for that index>).
type MatchValue struct {
IndexName string
Value string
}
// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update
// that is guaranteed to succeed.
// See the comment for GuaranteedUpdate for more details.
type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error)
// ValidateObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. The function may NOT transform the provided
// object.
type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error
// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc.
func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error {
return nil
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty"`
// Specifies the target ResourceVersion
// +optional
ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
u := types.UID(uid)
return &Preconditions{UID: &u}
}
func (p *Preconditions) Check(key string, obj runtime.Object) error {
if p == nil {
return nil
}
objMeta, err := meta.Accessor(obj)
if err != nil {
return NewInternalErrorf(
"can't enforce preconditions %v on un-introspectable object %v, got error: %v",
*p,
obj,
err)
}
if p.UID != nil && *p.UID != objMeta.GetUID() {
err := fmt.Sprintf(
"Precondition failed: UID in precondition: %v, UID in object meta: %v",
*p.UID,
objMeta.GetUID())
return NewInvalidObjError(key, err)
}
if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() {
err := fmt.Sprintf(
"Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v",
*p.ResourceVersion,
objMeta.GetResourceVersion())
return NewInvalidObjError(key, err)
}
return nil
}
// Interface offers a common interface for object marshaling/unmarshaling operations and
// hides all the storage-related operations behind it.
type Interface interface {
// Returns Versioner associated with this interface.
Versioner() Versioner
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
Delete(
ctx context.Context, key string, out runtime.Object, preconditions *Preconditions,
validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object) error
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// WatchList begins watching the specified key's items. Items are decoded into API
// objects and any item selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will list current objects directory defined by key
// and send them in "ADDED" events, before watch starts.
WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// Get unmarshals json found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error
// GetToList unmarshals json found at key and opaque it into *List api object
// (an object that satisfies the runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// List unmarshalls jsons found at directory defined by key and opaque them
// into *List api object (an object that satisfies runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// or zero value in 'ptrToType' parameter otherwise.
// If the object to update has the same value as previous, it won't do any update
// but will return the object in 'ptrToType' parameter.
// If 'suggestion' is non-nil, it can be used as a suggestion about the current version
// of the object to avoid read operation from storage to get it. However, the
// implementations have to retry in case suggestion is stale.
//
// Example:
//
// s := /* implementation of Interface */
// err := s.GuaranteedUpdate(
// "myKey", &MyType{}, true,
// func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) {
// // Before each invocation of the user defined function, "input" is reset to
// // current contents for "myKey" in database.
// curr := input.(*MyType) // Guaranteed to succeed.
//
// // Make the modification
// curr.Counter++
//
// // Return the modified object - return an error to stop iterating. Return
// // a uint64 to alter the TTL on the object, or nil to keep it the same value.
// return cur, nil, nil
// },
// )
GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error
// Count returns number of different entries under the key (generally being path prefix).
Count(key string) (int64, error)
}
// GetOptions provides the options that may be provided for storage get operations.
type GetOptions struct {
// IgnoreNotFound determines what is returned if the requested object is not found. If
// true, a zero object is returned. If false, an error is returned.
IgnoreNotFound bool
// ResourceVersion provides a resource version constraint to apply to the get operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
}
// ListOptions provides the options that may be provided for storage list operations.
type ListOptions struct {
// ResourceVersion provides a resource version constraint to apply to the list operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
// ResourceVersionMatch provides the rule for how the resource version constraint applies. If set
// to the default value "" the legacy resource version semantic apply.
ResourceVersionMatch metav1.ResourceVersionMatch
// Predicate provides the selection rules for the list operation.
Predicate SelectionPredicate
// ProgressNotify determines whether storage-originated bookmark (progress notify) events should
// be delivered to the users. The option is ignored for non-watch requests.
ProgressNotify bool
}
| staging/src/k8s.io/apiserver/pkg/storage/interfaces.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.006996996700763702,
0.0010435095755383372,
0.00016256378148682415,
0.0002643852785695344,
0.0015962996985763311
] |
{
"id": 2,
"code_window": [
"\t// Ignore the suggestion and try to pass down the current version of the object\n",
"\t// read from cache.\n",
"\tif elem, exists, err := c.watchCache.GetByKey(key); err != nil {\n",
"\t\tklog.Errorf(\"GetByKey returned error: %v\", err)\n",
"\t} else if exists {\n",
"\t\tcurrObj := elem.(*storeElement).Object.DeepCopyObject()\n",
"\t\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)\n",
"\t}\n",
"\t// If we couldn't get the object, fallback to no-suggestion.\n",
"\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// DeepCopy the object since we modify resource version when serializing the\n",
"\t\t// current object.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go",
"type": "add",
"edit_start_line_idx": 754
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated_expansion.go",
"node_client.go",
"runtimeclass.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1",
importpath = "k8s.io/client-go/kubernetes/typed/node/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/node/v1alpha1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| staging/src/k8s.io/client-go/kubernetes/typed/node/v1alpha1/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017477566143497825,
0.00017197620763909072,
0.00016899402544368058,
0.00017206757911480963,
0.000002429128244330059
] |
{
"id": 2,
"code_window": [
"\t// Ignore the suggestion and try to pass down the current version of the object\n",
"\t// read from cache.\n",
"\tif elem, exists, err := c.watchCache.GetByKey(key); err != nil {\n",
"\t\tklog.Errorf(\"GetByKey returned error: %v\", err)\n",
"\t} else if exists {\n",
"\t\tcurrObj := elem.(*storeElement).Object.DeepCopyObject()\n",
"\t\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)\n",
"\t}\n",
"\t// If we couldn't get the object, fallback to no-suggestion.\n",
"\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// DeepCopy the object since we modify resource version when serializing the\n",
"\t\t// current object.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go",
"type": "add",
"edit_start_line_idx": 754
} | package request
import (
"fmt"
"strings"
)
// A Handlers provides a collection of request handlers for various
// stages of handling requests.
type Handlers struct {
Validate HandlerList
Build HandlerList
BuildStream HandlerList
Sign HandlerList
Send HandlerList
ValidateResponse HandlerList
Unmarshal HandlerList
UnmarshalStream HandlerList
UnmarshalMeta HandlerList
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
CompleteAttempt HandlerList
Complete HandlerList
}
// Copy returns a copy of this handler's lists.
func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
Build: h.Build.copy(),
BuildStream: h.BuildStream.copy(),
Sign: h.Sign.copy(),
Send: h.Send.copy(),
ValidateResponse: h.ValidateResponse.copy(),
Unmarshal: h.Unmarshal.copy(),
UnmarshalStream: h.UnmarshalStream.copy(),
UnmarshalError: h.UnmarshalError.copy(),
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
CompleteAttempt: h.CompleteAttempt.copy(),
Complete: h.Complete.copy(),
}
}
// Clear removes callback functions for all handlers.
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()
h.BuildStream.Clear()
h.Send.Clear()
h.Sign.Clear()
h.Unmarshal.Clear()
h.UnmarshalStream.Clear()
h.UnmarshalMeta.Clear()
h.UnmarshalError.Clear()
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
h.CompleteAttempt.Clear()
h.Complete.Clear()
}
// IsEmpty returns if there are no handlers in any of the handlerlists.
func (h *Handlers) IsEmpty() bool {
if h.Validate.Len() != 0 {
return false
}
if h.Build.Len() != 0 {
return false
}
if h.BuildStream.Len() != 0 {
return false
}
if h.Send.Len() != 0 {
return false
}
if h.Sign.Len() != 0 {
return false
}
if h.Unmarshal.Len() != 0 {
return false
}
if h.UnmarshalStream.Len() != 0 {
return false
}
if h.UnmarshalMeta.Len() != 0 {
return false
}
if h.UnmarshalError.Len() != 0 {
return false
}
if h.ValidateResponse.Len() != 0 {
return false
}
if h.Retry.Len() != 0 {
return false
}
if h.AfterRetry.Len() != 0 {
return false
}
if h.CompleteAttempt.Len() != 0 {
return false
}
if h.Complete.Len() != 0 {
return false
}
return true
}
// A HandlerListRunItem represents an entry in the HandlerList which
// is being run.
type HandlerListRunItem struct {
Index int
Handler NamedHandler
Request *Request
}
// A HandlerList manages zero or more handlers in a list.
type HandlerList struct {
list []NamedHandler
// Called after each request handler in the list is called. If set
// and the func returns true the HandlerList will continue to iterate
// over the request handlers. If false is returned the HandlerList
// will stop iterating.
//
// Should be used if extra logic to be performed between each handler
// in the list. This can be used to terminate a list's iteration
// based on a condition such as error like, HandlerListStopOnError.
// Or for logging like HandlerListLogItem.
AfterEachFn func(item HandlerListRunItem) bool
}
// A NamedHandler is a struct that contains a name and function callback.
type NamedHandler struct {
Name string
Fn func(*Request)
}
// copy creates a copy of the handler list.
func (l *HandlerList) copy() HandlerList {
n := HandlerList{
AfterEachFn: l.AfterEachFn,
}
if len(l.list) == 0 {
return n
}
n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
return n
}
// Clear clears the handler list.
func (l *HandlerList) Clear() {
l.list = l.list[0:0]
}
// Len returns the number of handlers in the list.
func (l *HandlerList) Len() int {
return len(l.list)
}
// PushBack pushes handler f to the back of the handler list.
func (l *HandlerList) PushBack(f func(*Request)) {
l.PushBackNamed(NamedHandler{"__anonymous", f})
}
// PushBackNamed pushes named handler f to the back of the handler list.
func (l *HandlerList) PushBackNamed(n NamedHandler) {
if cap(l.list) == 0 {
l.list = make([]NamedHandler, 0, 5)
}
l.list = append(l.list, n)
}
// PushFront pushes handler f to the front of the handler list.
func (l *HandlerList) PushFront(f func(*Request)) {
l.PushFrontNamed(NamedHandler{"__anonymous", f})
}
// PushFrontNamed pushes named handler f to the front of the handler list.
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
if cap(l.list) == len(l.list) {
// Allocating new list required
l.list = append([]NamedHandler{n}, l.list...)
} else {
// Enough room to prepend into list.
l.list = append(l.list, NamedHandler{})
copy(l.list[1:], l.list)
l.list[0] = n
}
}
// Remove removes a NamedHandler n
func (l *HandlerList) Remove(n NamedHandler) {
l.RemoveByName(n.Name)
}
// RemoveByName removes a NamedHandler by name.
func (l *HandlerList) RemoveByName(name string) {
for i := 0; i < len(l.list); i++ {
m := l.list[i]
if m.Name == name {
// Shift array preventing creating new arrays
copy(l.list[i:], l.list[i+1:])
l.list[len(l.list)-1] = NamedHandler{}
l.list = l.list[:len(l.list)-1]
// decrement list so next check to length is correct
i--
}
}
}
// SwapNamed will swap out any existing handlers with the same name as the
// passed in NamedHandler returning true if handlers were swapped. False is
// returned otherwise.
func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
for i := 0; i < len(l.list); i++ {
if l.list[i].Name == n.Name {
l.list[i].Fn = n.Fn
swapped = true
}
}
return swapped
}
// Swap will swap out all handlers matching the name passed in. The matched
// handlers will be swapped in. True is returned if the handlers were swapped.
func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
var swapped bool
for i := 0; i < len(l.list); i++ {
if l.list[i].Name == name {
l.list[i] = replace
swapped = true
}
}
return swapped
}
// SetBackNamed will replace the named handler if it exists in the handler list.
// If the handler does not exist the handler will be added to the end of the list.
func (l *HandlerList) SetBackNamed(n NamedHandler) {
if !l.SwapNamed(n) {
l.PushBackNamed(n)
}
}
// SetFrontNamed will replace the named handler if it exists in the handler list.
// If the handler does not exist the handler will be added to the beginning of
// the list.
func (l *HandlerList) SetFrontNamed(n NamedHandler) {
if !l.SwapNamed(n) {
l.PushFrontNamed(n)
}
}
// Run executes all handlers in the list with a given request object.
func (l *HandlerList) Run(r *Request) {
for i, h := range l.list {
h.Fn(r)
item := HandlerListRunItem{
Index: i, Handler: h, Request: r,
}
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
return
}
}
}
// HandlerListLogItem logs the request handler and the state of the
// request's Error value. Always returns true to continue iterating
// request handlers in a HandlerList.
func HandlerListLogItem(item HandlerListRunItem) bool {
if item.Request.Config.Logger == nil {
return true
}
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
item.Index, item.Handler.Name, item.Request.Error)
return true
}
// HandlerListStopOnError returns false to stop the HandlerList iterating
// over request handlers if Request.Error is not nil. True otherwise
// to continue iterating.
func HandlerListStopOnError(item HandlerListRunItem) bool {
return item.Request.Error == nil
}
// WithAppendUserAgent will add a string to the user agent prefixed with a
// single white space.
func WithAppendUserAgent(s string) Option {
return func(r *Request) {
r.Handlers.Build.PushBack(func(r2 *Request) {
AddToUserAgent(r, s)
})
}
}
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
// header. If the extra parameters are provided they will be added as metadata to the
// name/version pair resulting in the following format.
// "name/version (extra0; extra1; ...)"
// The user agent part will be concatenated with this current request's user agent string.
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
ua := fmt.Sprintf("%s/%s", name, version)
if len(extra) > 0 {
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
}
return func(r *Request) {
AddToUserAgent(r, ua)
}
}
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
// The input string will be concatenated with the current request's user agent string.
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
return func(r *Request) {
AddToUserAgent(r, s)
}
}
// WithSetRequestHeaders updates the operation request's HTTP header to contain
// the header key value pairs provided. If the header key already exists in the
// request's HTTP header set, the existing value(s) will be replaced.
func WithSetRequestHeaders(h map[string]string) Option {
return withRequestHeader(h).SetRequestHeaders
}
type withRequestHeader map[string]string
func (h withRequestHeader) SetRequestHeaders(r *Request) {
for k, v := range h {
r.HTTPRequest.Header[k] = []string{v}
}
}
| vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0029883375391364098,
0.0002763308584690094,
0.00016176282952073961,
0.00017111292982008308,
0.0004869971890002489
] |
{
"id": 2,
"code_window": [
"\t// Ignore the suggestion and try to pass down the current version of the object\n",
"\t// read from cache.\n",
"\tif elem, exists, err := c.watchCache.GetByKey(key); err != nil {\n",
"\t\tklog.Errorf(\"GetByKey returned error: %v\", err)\n",
"\t} else if exists {\n",
"\t\tcurrObj := elem.(*storeElement).Object.DeepCopyObject()\n",
"\t\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)\n",
"\t}\n",
"\t// If we couldn't get the object, fallback to no-suggestion.\n",
"\treturn c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t// DeepCopy the object since we modify resource version when serializing the\n",
"\t\t// current object.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go",
"type": "add",
"edit_start_line_idx": 754
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"text/template"
"github.com/pkg/errors"
)
// ParseTemplate validates and parses passed as argument template
func ParseTemplate(strtmpl string, obj interface{}) ([]byte, error) {
var buf bytes.Buffer
tmpl, err := template.New("template").Parse(strtmpl)
if err != nil {
return nil, errors.Wrap(err, "error when parsing template")
}
err = tmpl.Execute(&buf, obj)
if err != nil {
return nil, errors.Wrap(err, "error when executing template")
}
return buf.Bytes(), nil
}
| cmd/kubeadm/app/util/template.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0001778663572622463,
0.0001716518308967352,
0.00016415743448305875,
0.00017229175136890262,
0.000006174402642500354
] |
{
"id": 3,
"code_window": [
"// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\n",
"func (s *store) GuaranteedUpdate(\n",
"\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {\n",
"\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n",
"\tdefer trace.LogIfLong(500 * time.Millisecond)\n",
"\n",
"\tv, err := conversion.EnforcePtr(out)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 287
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cacher
import (
"context"
"fmt"
"net/http"
"reflect"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
var (
emptyFunc = func() {}
)
const (
// storageWatchListPageSize is the cacher's request chunk size of
// initial and resync watch lists to storage.
storageWatchListPageSize = int64(10000)
// defaultBookmarkFrequency defines how frequently watch bookmarks should be send
// in addition to sending a bookmark right before watch deadline.
//
// NOTE: Update `eventFreshDuration` when changing this value.
defaultBookmarkFrequency = time.Minute
)
// Config contains the configuration for a given Cache.
type Config struct {
// An underlying storage.Interface.
Storage storage.Interface
// An underlying storage.Versioner.
Versioner storage.Versioner
// The Cache will be caching objects of a given Type and assumes that they
// are all stored under ResourcePrefix directory in the underlying database.
ResourcePrefix string
// KeyFunc is used to get a key in the underlying storage for a given object.
KeyFunc func(runtime.Object) (string, error)
// GetAttrsFunc is used to get object labels, fields
GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error)
// IndexerFuncs is used for optimizing amount of watchers that
// needs to process an incoming event.
IndexerFuncs storage.IndexerFuncs
// Indexers is used to accelerate the list operation, falls back to regular list
// operation if no indexer found.
Indexers *cache.Indexers
// NewFunc is a function that creates new empty object storing a object of type Type.
NewFunc func() runtime.Object
// NewList is a function that creates new empty object storing a list of
// objects of type Type.
NewListFunc func() runtime.Object
Codec runtime.Codec
Clock clock.Clock
}
type watchersMap map[int]*cacheWatcher
func (wm watchersMap) addWatcher(w *cacheWatcher, number int) {
wm[number] = w
}
func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) {
if watcher, ok := wm[number]; ok {
delete(wm, number)
done(watcher)
}
}
func (wm watchersMap) terminateAll(done func(*cacheWatcher)) {
for key, watcher := range wm {
delete(wm, key)
done(watcher)
}
}
type indexedWatchers struct {
allWatchers watchersMap
valueWatchers map[string]watchersMap
}
func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) {
if supported {
if _, ok := i.valueWatchers[value]; !ok {
i.valueWatchers[value] = watchersMap{}
}
i.valueWatchers[value].addWatcher(w, number)
} else {
i.allWatchers.addWatcher(w, number)
}
}
func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) {
if supported {
i.valueWatchers[value].deleteWatcher(number, done)
if len(i.valueWatchers[value]) == 0 {
delete(i.valueWatchers, value)
}
} else {
i.allWatchers.deleteWatcher(number, done)
}
}
func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) {
if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 {
klog.Warningf("Terminating all watchers from cacher %v", objectType)
}
i.allWatchers.terminateAll(done)
for _, watchers := range i.valueWatchers {
watchers.terminateAll(done)
}
i.valueWatchers = map[string]watchersMap{}
}
// As we don't need a high precision here, we keep all watchers timeout within a
// second in a bucket, and pop up them once at the timeout. To be more specific,
// if you set fire time at X, you can get the bookmark within (X-1,X+1) period.
type watcherBookmarkTimeBuckets struct {
lock sync.Mutex
// the key of watcherBuckets is the number of seconds since createTime
watchersBuckets map[int64][]*cacheWatcher
createTime time.Time
startBucketID int64
clock clock.Clock
bookmarkFrequency time.Duration
}
func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets {
return &watcherBookmarkTimeBuckets{
watchersBuckets: make(map[int64][]*cacheWatcher),
createTime: clock.Now(),
startBucketID: 0,
clock: clock,
bookmarkFrequency: bookmarkFrequency,
}
}
// adds a watcher to the bucket, if the deadline is before the start, it will be
// added to the first one.
func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency)
if !ok {
return false
}
bucketID := int64(nextTime.Sub(t.createTime) / time.Second)
t.lock.Lock()
defer t.lock.Unlock()
if bucketID < t.startBucketID {
bucketID = t.startBucketID
}
watchers, _ := t.watchersBuckets[bucketID]
t.watchersBuckets[bucketID] = append(watchers, w)
return true
}
func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher {
currentBucketID := int64(t.clock.Since(t.createTime) / time.Second)
// There should be one or two elements in almost all cases
expiredWatchers := make([][]*cacheWatcher, 0, 2)
t.lock.Lock()
defer t.lock.Unlock()
for ; t.startBucketID <= currentBucketID; t.startBucketID++ {
if watchers, ok := t.watchersBuckets[t.startBucketID]; ok {
delete(t.watchersBuckets, t.startBucketID)
expiredWatchers = append(expiredWatchers, watchers)
}
}
return expiredWatchers
}
type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool
type indexedTriggerFunc struct {
indexName string
indexerFunc storage.IndexerFunc
}
// Cacher is responsible for serving WATCH and LIST requests for a given
// resource from its internal cache and updating its cache in the background
// based on the underlying storage contents.
// Cacher implements storage.Interface (although most of the calls are just
// delegated to the underlying storage).
type Cacher struct {
// HighWaterMarks for performance debugging.
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
// See: https://golang.org/pkg/sync/atomic/ for more information
incomingHWM storage.HighWaterMark
// Incoming events that should be dispatched to watchers.
incoming chan watchCacheEvent
sync.RWMutex
// Before accessing the cacher's cache, wait for the ready to be ok.
// This is necessary to prevent users from accessing structures that are
// uninitialized or are being repopulated right now.
// ready needs to be set to false when the cacher is paused or stopped.
// ready needs to be set to true when the cacher is ready to use after
// initialization.
ready *ready
// Underlying storage.Interface.
storage storage.Interface
// Expected type of objects in the underlying cache.
objectType reflect.Type
// "sliding window" of recent changes of objects and the current state.
watchCache *watchCache
reflector *cache.Reflector
// Versioner is used to handle resource versions.
versioner storage.Versioner
// newFunc is a function that creates new empty object storing a object of type Type.
newFunc func() runtime.Object
// indexedTrigger is used for optimizing amount of watchers that needs to process
// an incoming event.
indexedTrigger *indexedTriggerFunc
// watchers is mapping from the value of trigger function that a
// watcher is interested into the watchers
watcherIdx int
watchers indexedWatchers
// Defines a time budget that can be spend on waiting for not-ready watchers
// while dispatching event before shutting them down.
dispatchTimeoutBudget timeBudget
// Handling graceful termination.
stopLock sync.RWMutex
stopped bool
stopCh chan struct{}
stopWg sync.WaitGroup
clock clock.Clock
// timer is used to avoid unnecessary allocations in underlying watchers.
timer *time.Timer
// dispatching determines whether there is currently dispatching of
// any event in flight.
dispatching bool
// watchersBuffer is a list of watchers potentially interested in currently
// dispatched event.
watchersBuffer []*cacheWatcher
// blockedWatchers is a list of watchers whose buffer is currently full.
blockedWatchers []*cacheWatcher
// watchersToStop is a list of watchers that were supposed to be stopped
// during current dispatching, but stopping was deferred to the end of
// dispatching that event to avoid race with closing channels in watchers.
watchersToStop []*cacheWatcher
// Maintain a timeout queue to send the bookmark event before the watcher times out.
bookmarkWatchers *watcherBookmarkTimeBuckets
}
// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from
// its internal cache and updating its cache in the background based on the
// given configuration.
func NewCacherFromConfig(config Config) (*Cacher, error) {
stopCh := make(chan struct{})
obj := config.NewFunc()
// Give this error when it is constructed rather than when you get the
// first watch item, because it's much easier to track down that way.
if err := runtime.CheckCodec(config.Codec, obj); err != nil {
return nil, fmt.Errorf("storage codec doesn't seem to match given type: %v", err)
}
var indexedTrigger *indexedTriggerFunc
if config.IndexerFuncs != nil {
// For now, we don't support multiple trigger functions defined
// for a given resource.
if len(config.IndexerFuncs) > 1 {
return nil, fmt.Errorf("cacher %s doesn't support more than one IndexerFunc: ", reflect.TypeOf(obj).String())
}
for key, value := range config.IndexerFuncs {
if value != nil {
indexedTrigger = &indexedTriggerFunc{
indexName: key,
indexerFunc: value,
}
}
}
}
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
objType := reflect.TypeOf(obj)
cacher := &Cacher{
ready: newReady(),
storage: config.Storage,
objectType: objType,
versioner: config.Versioner,
newFunc: config.NewFunc,
indexedTrigger: indexedTrigger,
watcherIdx: 0,
watchers: indexedWatchers{
allWatchers: make(map[int]*cacheWatcher),
valueWatchers: make(map[string]watchersMap),
},
// TODO: Figure out the correct value for the buffer size.
incoming: make(chan watchCacheEvent, 100),
dispatchTimeoutBudget: newTimeBudget(stopCh),
// We need to (potentially) stop both:
// - wait.Until go-routine
// - reflector.ListAndWatch
// and there are no guarantees on the order that they will stop.
// So we will be simply closing the channel, and synchronizing on the WaitGroup.
stopCh: stopCh,
clock: config.Clock,
timer: time.NewTimer(time.Duration(0)),
bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency),
}
// Ensure that timer is stopped.
if !cacher.timer.Stop() {
// Consume triggered (but not yet received) timer event
// so that future reuse does not get a spurious timeout.
<-cacher.timer.C
}
watchCache := newWatchCache(
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType)
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
// Configure reflector's pager to for an appropriate pagination chunk size for fetching data from
// storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error.
reflector.WatchListPageSize = storageWatchListPageSize
cacher.watchCache = watchCache
cacher.reflector = reflector
go cacher.dispatchEvents()
cacher.stopWg.Add(1)
go func() {
defer cacher.stopWg.Done()
defer cacher.terminateAllWatchers()
wait.Until(
func() {
if !cacher.isStopped() {
cacher.startCaching(stopCh)
}
}, time.Second, stopCh,
)
}()
return cacher, nil
}
func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
// The 'usable' lock is always 'RLock'able when it is safe to use the cache.
// It is safe to use the cache after a successful list until a disconnection.
// We start with usable (write) locked. The below OnReplace function will
// unlock it after a successful list. The below defer will then re-lock
// it when this function exits (always due to disconnection), only if
// we actually got a successful list. This cycle will repeat as needed.
successfulList := false
c.watchCache.SetOnReplace(func() {
successfulList = true
c.ready.set(true)
klog.V(1).Infof("cacher (%v): initialized", c.objectType.String())
})
defer func() {
if successfulList {
c.ready.set(false)
}
}()
c.terminateAllWatchers()
// Note that since onReplace may be not called due to errors, we explicitly
// need to retry it on errors under lock.
// Also note that startCaching is called in a loop, so there's no need
// to have another loop here.
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err)
}
}
// Versioner implements storage.Interface.
func (c *Cacher) Versioner() storage.Versioner {
return c.storage.Versioner()
}
// Create implements storage.Interface.
func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
return c.storage.Create(ctx, key, obj, out, ttl)
}
// Delete implements storage.Interface.
func (c *Cacher) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, _ runtime.Object) error {
// Ignore the suggestion and try to pass down the current version of the object
// read from cache.
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
klog.Errorf("GetByKey returned error: %v", err)
} else if exists {
// DeepCopy the object since we modify resource version when serializing the
// current object.
currObj := elem.(*storeElement).Object.DeepCopyObject()
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj)
}
// If we couldn't get the object, fallback to no-suggestion.
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil)
}
// Watch implements storage.Interface.
func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
pred := opts.Predicate
watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
c.ready.wait()
triggerValue, triggerSupported := "", false
if c.indexedTrigger != nil {
for _, field := range pred.IndexFields {
if field == c.indexedTrigger.indexName {
if value, ok := pred.Field.RequiresExactMatch(field); ok {
triggerValue, triggerSupported = value, true
}
}
}
}
// If there is indexedTrigger defined, but triggerSupported is false,
// we can't narrow the amount of events significantly at this point.
//
// That said, currently indexedTrigger is defined only for couple resources:
// Pods, Nodes, Secrets and ConfigMaps and there is only a constant
// number of watchers for which triggerSupported is false (excluding those
// issued explicitly by users).
// Thus, to reduce the risk of those watchers blocking all watchers of a
// given resource in the system, we increase the sizes of buffers for them.
chanSize := 10
if c.indexedTrigger != nil && !triggerSupported {
// TODO: We should tune this value and ideally make it dependent on the
// number of objects of a given type and/or their churn.
chanSize = 1000
}
// Determine watch timeout('0' means deadline is not set, ignore checking)
deadline, _ := ctx.Deadline()
// Create a watcher here to reduce memory allocations under lock,
// given that memory allocation may trigger GC and block the thread.
// Also note that emptyFunc is a placeholder, until we will be able
// to compute watcher.forget function (which has to happen under lock).
watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType)
// We explicitly use thread unsafe version and do locking ourself to ensure that
// no new events will be processed in the meantime. The watchCache will be unlocked
// on return from this function.
// Note that we cannot do it under Cacher lock, to avoid a deadlock, since the
// underlying watchCache is calling processEvent under its lock.
c.watchCache.RLock()
defer c.watchCache.RUnlock()
initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV)
if err != nil {
// To match the uncached watch implementation, once we have passed authn/authz/admission,
// and successfully parsed a resource version, other errors must fail with a watch event of type ERROR,
// rather than a directly returned error.
return newErrWatcher(err), nil
}
// With some events already sent, update resourceVersion so that
// events that were buffered and not yet processed won't be delivered
// to this watcher second time causing going back in time.
if len(initEvents) > 0 {
watchRV = initEvents[len(initEvents)-1].ResourceVersion
}
func() {
c.Lock()
defer c.Unlock()
// Update watcher.forget function once we can compute it.
watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported)
c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported)
// Add it to the queue only when the client support watch bookmarks.
if watcher.allowWatchBookmarks {
c.bookmarkWatchers.addWatcher(watcher)
}
c.watcherIdx++
}()
go watcher.process(ctx, initEvents, watchRV)
return watcher, nil
}
// WatchList implements storage.Interface.
func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return c.Watch(ctx, key, opts)
}
// Get implements storage.Interface.
func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
if opts.ResourceVersion == "" {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility).
return c.storage.Get(ctx, key, opts, objPtr)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return err
}
if getRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.Get(ctx, key, opts, objPtr)
}
// Do not create a trace - it's not for free and there are tons
// of Get requests. We can add it if it will be really needed.
c.ready.wait()
objVal, err := conversion.EnforcePtr(objPtr)
if err != nil {
return err
}
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil)
if err != nil {
return err
}
if exists {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
objVal.Set(reflect.ValueOf(elem.Object).Elem())
} else {
objVal.Set(reflect.Zero(objVal.Type()))
if !opts.IgnoreNotFound {
return storage.NewKeyNotFoundError(key, int64(readResourceVersion))
}
}
return nil
}
// GetToList implements storage.Interface.
func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
pred := opts.Predicate
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
hasContinuation := pagingEnabled && len(pred.Continue) > 0
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility). If a continuation is
// requested, serve it from the underlying storage as well.
// Limits are only sent to storage when resourceVersion is non-zero
// since the watch cache isn't able to perform continuations, and
// limits are ignored when resource version is zero
return c.storage.GetToList(ctx, key, opts, listObj)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return err
}
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.GetToList(ctx, key, opts, listObj)
}
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
defer trace.LogIfLong(500 * time.Millisecond)
c.ready.wait()
trace.Step("Ready")
// List elements with at least 'listRV' from cache.
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
listVal, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
if listVal.Kind() != reflect.Slice {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
filter := filterWithAttrsFunction(key, pred)
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace)
if err != nil {
return err
}
trace.Step("Got from cache")
if exists {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
if filter(elem.Key, elem.Labels, elem.Fields) {
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
}
}
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
return err
}
}
return nil
}
// List implements storage.Interface.
func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
pred := opts.Predicate
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
hasContinuation := pagingEnabled && len(pred.Continue) > 0
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility). If a continuation is
// requested, serve it from the underlying storage as well.
// Limits are only sent to storage when resourceVersion is non-zero
// since the watch cache isn't able to perform continuations, and
// limits are ignored when resource version is zero.
return c.storage.List(ctx, key, opts, listObj)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return err
}
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.List(ctx, key, opts, listObj)
}
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
defer trace.LogIfLong(500 * time.Millisecond)
c.ready.wait()
trace.Step("Ready")
// List elements with at least 'listRV' from cache.
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
listVal, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
if listVal.Kind() != reflect.Slice {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
filter := filterWithAttrsFunction(key, pred)
objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace)
if err != nil {
return err
}
trace.Step("Listed items from cache", utiltrace.Field{"count", len(objs)})
if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() {
// Resize the slice appropriately, since we already know that none
// of the elements will be filtered out.
listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs)))
trace.Step("Resized result")
}
for _, obj := range objs {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
if filter(elem.Key, elem.Labels, elem.Fields) {
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
}
}
trace.Step("Filtered items", utiltrace.Field{"count", listVal.Len()})
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
return err
}
}
return nil
}
// GuaranteedUpdate implements storage.Interface.
func (c *Cacher) GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ runtime.Object) error {
// Ignore the suggestion and try to pass down the current version of the object
// read from cache.
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
klog.Errorf("GetByKey returned error: %v", err)
} else if exists {
currObj := elem.(*storeElement).Object.DeepCopyObject()
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)
}
// If we couldn't get the object, fallback to no-suggestion.
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)
}
// Count implements storage.Interface.
func (c *Cacher) Count(pathPrefix string) (int64, error) {
return c.storage.Count(pathPrefix)
}
// baseObjectThreadUnsafe omits locking for cachingObject.
func baseObjectThreadUnsafe(object runtime.Object) runtime.Object {
if co, ok := object.(*cachingObject); ok {
return co.object
}
return object
}
func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bool) {
if c.indexedTrigger == nil {
return nil, false
}
result := make([]string, 0, 2)
result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object)))
if event.PrevObject == nil {
return result, true
}
prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject))
if result[0] != prevTriggerValue {
result = append(result, prevTriggerValue)
}
return result, true
}
func (c *Cacher) processEvent(event *watchCacheEvent) {
if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) {
// Monitor if this gets backed up, and how much.
klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen)
}
c.incoming <- *event
}
func (c *Cacher) dispatchEvents() {
// Jitter to help level out any aggregate load.
bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25))
defer bookmarkTimer.Stop()
lastProcessedResourceVersion := uint64(0)
for {
select {
case event, ok := <-c.incoming:
if !ok {
return
}
// Don't dispatch bookmarks coming from the storage layer.
// They can be very frequent (even to the level of subseconds)
// to allow efficient watch resumption on kube-apiserver restarts,
// and propagating them down may overload the whole system.
//
// TODO: If at some point we decide the performance and scalability
// footprint is acceptable, this is the place to hook them in.
// However, we then need to check if this was called as a result
// of a bookmark event or regular Add/Update/Delete operation by
// checking if resourceVersion here has changed.
if event.Type != watch.Bookmark {
c.dispatchEvent(&event)
}
lastProcessedResourceVersion = event.ResourceVersion
case <-bookmarkTimer.C():
bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25))
// Never send a bookmark event if we did not see an event here, this is fine
// because we don't provide any guarantees on sending bookmarks.
if lastProcessedResourceVersion == 0 {
// pop expired watchers in case there has been no update
c.bookmarkWatchers.popExpiredWatchers()
continue
}
bookmarkEvent := &watchCacheEvent{
Type: watch.Bookmark,
Object: c.newFunc(),
ResourceVersion: lastProcessedResourceVersion,
}
if err := c.versioner.UpdateObject(bookmarkEvent.Object, bookmarkEvent.ResourceVersion); err != nil {
klog.Errorf("failure to set resourceVersion to %d on bookmark event %+v", bookmarkEvent.ResourceVersion, bookmarkEvent.Object)
continue
}
c.dispatchEvent(bookmarkEvent)
case <-c.stopCh:
return
}
}
}
func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) {
switch event.Type {
case watch.Added, watch.Modified:
if object, err := newCachingObject(event.Object); err == nil {
event.Object = object
} else {
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
}
// Don't wrap PrevObject for update event (for create events it is nil).
// We only encode those to deliver DELETE watch events, so if
// event.Object is not nil it can be used only for watchers for which
// selector was satisfied for its previous version and is no longer
// satisfied for the current version.
// This is rare enough that it doesn't justify making deep-copy of the
// object (done by newCachingObject) every time.
case watch.Deleted:
// Don't wrap Object for delete events - these are not to deliver any
// events. Only wrap PrevObject.
if object, err := newCachingObject(event.PrevObject); err == nil {
// Update resource version of the underlying object.
// event.PrevObject is used to deliver DELETE watch events and
// for them, we set resourceVersion to <current> instead of
// the resourceVersion of the last modification of the object.
updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion)
event.PrevObject = object
} else {
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
}
}
}
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
c.startDispatching(event)
defer c.finishDispatching()
// Watchers stopped after startDispatching will be delayed to finishDispatching,
// Since add() can block, we explicitly add when cacher is unlocked.
// Dispatching event in nonblocking way first, which make faster watchers
// not be blocked by slower ones.
if event.Type == watch.Bookmark {
for _, watcher := range c.watchersBuffer {
watcher.nonblockingAdd(event)
}
} else {
// Set up caching of object serializations only for dispatching this event.
//
// Storing serializations in memory would result in increased memory usage,
// but it would help for caching encodings for watches started from old
// versions. However, we still don't have a convincing data that the gain
// from it justifies increased memory usage, so for now we drop the cached
// serializations after dispatching this event.
//
// Given the deep-copies that are done to create cachingObjects,
// we try to cache serializations only if there are at least 3 watchers.
if len(c.watchersBuffer) >= 3 {
// Make a shallow copy to allow overwriting Object and PrevObject.
wcEvent := *event
setCachingObjects(&wcEvent, c.versioner)
event = &wcEvent
}
c.blockedWatchers = c.blockedWatchers[:0]
for _, watcher := range c.watchersBuffer {
if !watcher.nonblockingAdd(event) {
c.blockedWatchers = append(c.blockedWatchers, watcher)
}
}
if len(c.blockedWatchers) > 0 {
// dispatchEvent is called very often, so arrange
// to reuse timers instead of constantly allocating.
startTime := time.Now()
timeout := c.dispatchTimeoutBudget.takeAvailable()
c.timer.Reset(timeout)
// Make sure every watcher will try to send event without blocking first,
// even if the timer has already expired.
timer := c.timer
for _, watcher := range c.blockedWatchers {
if !watcher.add(event, timer) {
// fired, clean the timer by set it to nil.
timer = nil
}
}
// Stop the timer if it is not fired
if timer != nil && !timer.Stop() {
// Consume triggered (but not yet received) timer event
// so that future reuse does not get a spurious timeout.
<-timer.C
}
c.dispatchTimeoutBudget.returnUnused(timeout - time.Since(startTime))
}
}
}
func (c *Cacher) startDispatchingBookmarkEvents() {
// Pop already expired watchers. However, explicitly ignore stopped ones,
// as we don't delete watcher from bookmarkWatchers when it is stopped.
for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() {
for _, watcher := range watchers {
// c.Lock() is held here.
// watcher.stopThreadUnsafe() is protected by c.Lock()
if watcher.stopped {
continue
}
c.watchersBuffer = append(c.watchersBuffer, watcher)
// Requeue the watcher for the next bookmark if needed.
c.bookmarkWatchers.addWatcher(watcher)
}
}
}
// startDispatching chooses watchers potentially interested in a given event
// a marks dispatching as true.
func (c *Cacher) startDispatching(event *watchCacheEvent) {
// It is safe to call triggerValuesThreadUnsafe here, because at this
// point only this thread can access this event (we create a separate
// watchCacheEvent for every dispatch).
triggerValues, supported := c.triggerValuesThreadUnsafe(event)
c.Lock()
defer c.Unlock()
c.dispatching = true
// We are reusing the slice to avoid memory reallocations in every
// dispatchEvent() call. That may prevent Go GC from freeing items
// from previous phases that are sitting behind the current length
// of the slice, but there is only a limited number of those and the
// gain from avoiding memory allocations is much bigger.
c.watchersBuffer = c.watchersBuffer[:0]
if event.Type == watch.Bookmark {
c.startDispatchingBookmarkEvents()
// return here to reduce following code indentation and diff
return
}
// Iterate over "allWatchers" no matter what the trigger function is.
for _, watcher := range c.watchers.allWatchers {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
if supported {
// Iterate over watchers interested in the given values of the trigger.
for _, triggerValue := range triggerValues {
for _, watcher := range c.watchers.valueWatchers[triggerValue] {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
}
} else {
// supported equal to false generally means that trigger function
// is not defined (or not aware of any indexes). In this case,
// watchers filters should generally also don't generate any
// trigger values, but can cause problems in case of some
// misconfiguration. Thus we paranoidly leave this branch.
// Iterate over watchers interested in exact values for all values.
for _, watchers := range c.watchers.valueWatchers {
for _, watcher := range watchers {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
}
}
}
// finishDispatching stops all the watchers that were supposed to be
// stopped in the meantime, but it was deferred to avoid closing input
// channels of watchers, as add() may still have writing to it.
// It also marks dispatching as false.
func (c *Cacher) finishDispatching() {
c.Lock()
defer c.Unlock()
c.dispatching = false
for _, watcher := range c.watchersToStop {
watcher.stopThreadUnsafe()
}
c.watchersToStop = c.watchersToStop[:0]
}
func (c *Cacher) terminateAllWatchers() {
c.Lock()
defer c.Unlock()
c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe)
}
func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) {
if c.dispatching {
c.watchersToStop = append(c.watchersToStop, watcher)
} else {
watcher.stopThreadUnsafe()
}
}
func (c *Cacher) isStopped() bool {
c.stopLock.RLock()
defer c.stopLock.RUnlock()
return c.stopped
}
// Stop implements the graceful termination.
func (c *Cacher) Stop() {
c.stopLock.Lock()
if c.stopped {
// avoid stopping twice (note: cachers are shared with subresources)
c.stopLock.Unlock()
return
}
c.stopped = true
c.stopLock.Unlock()
close(c.stopCh)
c.stopWg.Wait()
}
func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() {
return func() {
c.Lock()
defer c.Unlock()
// It's possible that the watcher is already not in the structure (e.g. in case of
// simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopThreadUnsafe()
// on a watcher multiple times.
c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe)
}
}
func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc {
filterFunc := func(objKey string, label labels.Set, field fields.Set) bool {
if !hasPathPrefix(objKey, key) {
return false
}
return p.MatchesObjectAttributes(label, field)
}
return filterFunc
}
// LastSyncResourceVersion returns resource version to which the underlying cache is synced.
func (c *Cacher) LastSyncResourceVersion() (uint64, error) {
c.ready.wait()
resourceVersion := c.reflector.LastSyncResourceVersion()
return c.versioner.ParseResourceVersion(resourceVersion)
}
// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher.
type cacherListerWatcher struct {
storage storage.Interface
resourcePrefix string
newListFunc func() runtime.Object
}
// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher.
func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
return &cacherListerWatcher{
storage: storage,
resourcePrefix: resourcePrefix,
newListFunc: newListFunc,
}
}
// Implements cache.ListerWatcher interface.
func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
list := lw.newListFunc()
pred := storage.SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
Limit: options.Limit,
Continue: options.Continue,
}
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil {
return nil, err
}
return list, nil
}
// Implements cache.ListerWatcher interface.
func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
opts := storage.ListOptions{
ResourceVersion: options.ResourceVersion,
Predicate: storage.Everything,
}
if utilfeature.DefaultFeatureGate.Enabled(features.EfficientWatchResumption) {
opts.ProgressNotify = true
}
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, opts)
}
// errWatcher implements watch.Interface to return a single error
type errWatcher struct {
result chan watch.Event
}
func newErrWatcher(err error) *errWatcher {
// Create an error event
errEvent := watch.Event{Type: watch.Error}
switch err := err.(type) {
case runtime.Object:
errEvent.Object = err
case *errors.StatusError:
errEvent.Object = &err.ErrStatus
default:
errEvent.Object = &metav1.Status{
Status: metav1.StatusFailure,
Message: err.Error(),
Reason: metav1.StatusReasonInternalError,
Code: http.StatusInternalServerError,
}
}
// Create a watcher with room for a single event, populate it, and close the channel
watcher := &errWatcher{result: make(chan watch.Event, 1)}
watcher.result <- errEvent
close(watcher.result)
return watcher
}
// Implements watch.Interface.
func (c *errWatcher) ResultChan() <-chan watch.Event {
return c.result
}
// Implements watch.Interface.
func (c *errWatcher) Stop() {
// no-op
}
// cacheWatcher implements watch.Interface
// this is not thread-safe
type cacheWatcher struct {
input chan *watchCacheEvent
result chan watch.Event
done chan struct{}
filter filterWithAttrsFunc
stopped bool
forget func()
versioner storage.Versioner
// The watcher will be closed by server after the deadline,
// save it here to send bookmark events before that.
deadline time.Time
allowWatchBookmarks bool
// Object type of the cache watcher interests
objectType reflect.Type
}
func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type) *cacheWatcher {
return &cacheWatcher{
input: make(chan *watchCacheEvent, chanSize),
result: make(chan watch.Event, chanSize),
done: make(chan struct{}),
filter: filter,
stopped: false,
forget: forget,
versioner: versioner,
deadline: deadline,
allowWatchBookmarks: allowWatchBookmarks,
objectType: objectType,
}
}
// Implements watch.Interface.
func (c *cacheWatcher) ResultChan() <-chan watch.Event {
return c.result
}
// Implements watch.Interface.
func (c *cacheWatcher) Stop() {
c.forget()
}
// we rely on the fact that stopThredUnsafe is actually protected by Cacher.Lock()
func (c *cacheWatcher) stopThreadUnsafe() {
if !c.stopped {
c.stopped = true
close(c.done)
close(c.input)
}
}
func (c *cacheWatcher) nonblockingAdd(event *watchCacheEvent) bool {
select {
case c.input <- event:
return true
default:
return false
}
}
// Nil timer means that add will not block (if it can't send event immediately, it will break the watcher)
func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool {
// Try to send the event immediately, without blocking.
if c.nonblockingAdd(event) {
return true
}
closeFunc := func() {
// This means that we couldn't send event to that watcher.
// Since we don't want to block on it infinitely,
// we simply terminate it.
klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String())
c.forget()
}
if timer == nil {
closeFunc()
return false
}
// OK, block sending, but only until timer fires.
select {
case c.input <- event:
return true
case <-timer.C:
closeFunc()
return false
}
}
func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) {
// We try to send bookmarks:
// (a) roughly every minute
// (b) right before the watcher timeout - for now we simply set it 2s before
// the deadline
// The former gives us periodicity if the watch breaks due to unexpected
// conditions, the later ensures that on timeout the watcher is as close to
// now as possible - this covers 99% of cases.
heartbeatTime := now.Add(bookmarkFrequency)
if c.deadline.IsZero() {
// Timeout is set by our client libraries (e.g. reflector) as well as defaulted by
// apiserver if properly configured. So this shoudln't happen in practice.
return heartbeatTime, true
}
if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) {
heartbeatTime = pretimeoutTime
}
if heartbeatTime.Before(now) {
return time.Time{}, false
}
return heartbeatTime, true
}
func getEventObject(object runtime.Object) runtime.Object {
if _, ok := object.(runtime.CacheableObject); ok {
// It is safe to return without deep-copy, because the underlying
// object was already deep-copied during construction.
return object
}
return object.DeepCopyObject()
}
func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) {
if _, ok := object.(*cachingObject); ok {
// We assume that for cachingObject resourceVersion was already propagated before.
return
}
if err := versioner.UpdateObject(object, resourceVersion); err != nil {
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err))
}
}
func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event {
if event.Type == watch.Bookmark {
return &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()}
}
curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields)
oldObjPasses := false
if event.PrevObject != nil {
oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields)
}
if !curObjPasses && !oldObjPasses {
// Watcher is not interested in that object.
return nil
}
switch {
case curObjPasses && !oldObjPasses:
return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)}
case curObjPasses && oldObjPasses:
return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)}
case !curObjPasses && oldObjPasses:
// return a delete event with the previous object content, but with the event's resource version
oldObj := getEventObject(event.PrevObject)
updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion)
return &watch.Event{Type: watch.Deleted, Object: oldObj}
}
return nil
}
// NOTE: sendWatchCacheEvent is assumed to not modify <event> !!!
func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) {
watchEvent := c.convertToWatchEvent(event)
if watchEvent == nil {
// Watcher is not interested in that object.
return
}
// We need to ensure that if we put event X to the c.result, all
// previous events were already put into it before, no matter whether
// c.done is close or not.
// Thus we cannot simply select from c.done and c.result and this
// would give us non-determinism.
// At the same time, we don't want to block infinitely on putting
// to c.result, when c.done is already closed.
// This ensures that with c.done already close, we at most once go
// into the next select after this. With that, no matter which
// statement we choose there, we will deliver only consecutive
// events.
select {
case <-c.done:
return
default:
}
select {
case c.result <- *watchEvent:
case <-c.done:
}
}
func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) {
defer utilruntime.HandleCrash()
// Check how long we are processing initEvents.
// As long as these are not processed, we are not processing
// any incoming events, so if it takes long, we may actually
// block all watchers for some time.
// TODO: From the logs it seems that there happens processing
// times even up to 1s which is very long. However, this doesn't
// depend that much on the number of initEvents. E.g. from the
// 2000-node Kubemark run we have logs like this, e.g.:
// ... processing 13862 initEvents took 66.808689ms
// ... processing 14040 initEvents took 993.532539ms
// We should understand what is blocking us in those cases (e.g.
// is it lack of CPU, network, or sth else) and potentially
// consider increase size of result buffer in those cases.
const initProcessThreshold = 500 * time.Millisecond
startTime := time.Now()
for _, event := range initEvents {
c.sendWatchCacheEvent(event)
}
objType := c.objectType.String()
if len(initEvents) > 0 {
initCounter.WithLabelValues(objType).Add(float64(len(initEvents)))
}
processingTime := time.Since(startTime)
if processingTime > initProcessThreshold {
klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime)
}
defer close(c.result)
defer c.Stop()
for {
select {
case event, ok := <-c.input:
if !ok {
return
}
// only send events newer than resourceVersion
if event.ResourceVersion > resourceVersion {
c.sendWatchCacheEvent(event)
}
case <-ctx.Done():
return
}
}
}
type ready struct {
ok bool
c *sync.Cond
}
func newReady() *ready {
return &ready{c: sync.NewCond(&sync.RWMutex{})}
}
func (r *ready) wait() {
r.c.L.Lock()
for !r.ok {
r.c.Wait()
}
r.c.L.Unlock()
}
// TODO: Make check() function more sophisticated, in particular
// allow it to behave as "waitWithTimeout".
func (r *ready) check() bool {
rwMutex := r.c.L.(*sync.RWMutex)
rwMutex.RLock()
defer rwMutex.RUnlock()
return r.ok
}
func (r *ready) set(ok bool) {
r.c.L.Lock()
defer r.c.L.Unlock()
r.ok = ok
r.c.Broadcast()
}
| staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9987723231315613,
0.03194223716855049,
0.00016063617658801377,
0.0001701468718238175,
0.16151180863380432
] |
{
"id": 3,
"code_window": [
"// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\n",
"func (s *store) GuaranteedUpdate(\n",
"\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {\n",
"\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n",
"\tdefer trace.LogIfLong(500 * time.Millisecond)\n",
"\n",
"\tv, err := conversion.EnforcePtr(out)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 287
} | /*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ContainerCredentialGuardInstance struct {
Id string `json:"Id,omitempty"`
CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"`
HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"`
}
| vendor/github.com/Microsoft/hcsshim/internal/schema2/container_credential_guard_instance.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017222716996911913,
0.00017007665883284062,
0.0001679261476965621,
0.00017007665883284062,
0.00000215051113627851
] |
{
"id": 3,
"code_window": [
"// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\n",
"func (s *store) GuaranteedUpdate(\n",
"\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {\n",
"\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n",
"\tdefer trace.LogIfLong(500 * time.Millisecond)\n",
"\n",
"\tv, err := conversion.EnforcePtr(out)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 287
} | apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
| cluster/addons/calico-policy-controller/globalnetworksets-crd.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017538733663968742,
0.00017325038788840175,
0.00017111342458520085,
0.00017325038788840175,
0.0000021369560272432864
] |
{
"id": 3,
"code_window": [
"// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.\n",
"func (s *store) GuaranteedUpdate(\n",
"\tctx context.Context, key string, out runtime.Object, ignoreNotFound bool,\n",
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {\n",
"\ttrace := utiltrace.New(\"GuaranteedUpdate etcd3\", utiltrace.Field{\"type\", getTypeName(out)})\n",
"\tdefer trace.LogIfLong(500 * time.Millisecond)\n",
"\n",
"\tv, err := conversion.EnforcePtr(out)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tpreconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, cachedExistingObject runtime.Object) error {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 287
} | /*
Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
WebServices must be added to a container (see below) in order to handler Http requests from a server.
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
This package has the logic to find the best matching Route and if found, call its Function.
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
...
// GET http://localhost:8080/users/1
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
The Default container of go-restful uses the http.DefaultServeMux.
You can create your own Container and create a new http.Server for that particular container.
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
In the restful package there are three hooks into the request,response flow where filters can be added.
Each filter must define a FilterFunction:
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
Use the following statement to pass the request,response pair to the next filter or RouteFunction
chain.ProcessFilter(req, resp)
Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
Route Filters
These are processed before calling the function associated with the Route.
// install 2 chained route filters (processed before calling findUser)
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
restful.DefaultContainer.EnableContentEncoding(true)
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
400: Bad Request
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
404: Not Found
Despite a valid URI, the resource requested may not be available
500: Internal Server Error
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
405: Method Not Allowed
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
406: Not Acceptable
The request does not have or has an unknown Accept Header set for this operation.
415: Unsupported Media Type
The request does not have or has an unknown Content-Type Header set for this operation.
ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.DoNotRecover(false)
DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to false, the container will recover from panics.
Default value is true
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
Resources
[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful
| vendor/github.com/emicklei/go-restful/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00022243968851398677,
0.00017067378212232143,
0.00015956556308083236,
0.00016865661018528044,
0.000013095984286337625
] |
{
"id": 4,
"code_window": [
"\t\t}\n",
"\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n",
"\t}\n",
"\n",
"\tvar origState *objState\n",
"\tvar mustCheckData bool\n",
"\tif suggestion != nil {\n",
"\t\torigState, err = s.getStateFromObject(suggestion)\n",
"\t\tmustCheckData = true\n",
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tvar origStateIsCurrent bool\n",
"\tif cachedExistingObject != nil {\n",
"\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 308
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cacher
import (
"context"
"fmt"
"net/http"
"reflect"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
var (
emptyFunc = func() {}
)
const (
// storageWatchListPageSize is the cacher's request chunk size of
// initial and resync watch lists to storage.
storageWatchListPageSize = int64(10000)
// defaultBookmarkFrequency defines how frequently watch bookmarks should be send
// in addition to sending a bookmark right before watch deadline.
//
// NOTE: Update `eventFreshDuration` when changing this value.
defaultBookmarkFrequency = time.Minute
)
// Config contains the configuration for a given Cache.
type Config struct {
// An underlying storage.Interface.
Storage storage.Interface
// An underlying storage.Versioner.
Versioner storage.Versioner
// The Cache will be caching objects of a given Type and assumes that they
// are all stored under ResourcePrefix directory in the underlying database.
ResourcePrefix string
// KeyFunc is used to get a key in the underlying storage for a given object.
KeyFunc func(runtime.Object) (string, error)
// GetAttrsFunc is used to get object labels, fields
GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error)
// IndexerFuncs is used for optimizing amount of watchers that
// needs to process an incoming event.
IndexerFuncs storage.IndexerFuncs
// Indexers is used to accelerate the list operation, falls back to regular list
// operation if no indexer found.
Indexers *cache.Indexers
// NewFunc is a function that creates new empty object storing a object of type Type.
NewFunc func() runtime.Object
// NewList is a function that creates new empty object storing a list of
// objects of type Type.
NewListFunc func() runtime.Object
Codec runtime.Codec
Clock clock.Clock
}
type watchersMap map[int]*cacheWatcher
func (wm watchersMap) addWatcher(w *cacheWatcher, number int) {
wm[number] = w
}
func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) {
if watcher, ok := wm[number]; ok {
delete(wm, number)
done(watcher)
}
}
func (wm watchersMap) terminateAll(done func(*cacheWatcher)) {
for key, watcher := range wm {
delete(wm, key)
done(watcher)
}
}
type indexedWatchers struct {
allWatchers watchersMap
valueWatchers map[string]watchersMap
}
func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string, supported bool) {
if supported {
if _, ok := i.valueWatchers[value]; !ok {
i.valueWatchers[value] = watchersMap{}
}
i.valueWatchers[value].addWatcher(w, number)
} else {
i.allWatchers.addWatcher(w, number)
}
}
func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) {
if supported {
i.valueWatchers[value].deleteWatcher(number, done)
if len(i.valueWatchers[value]) == 0 {
delete(i.valueWatchers, value)
}
} else {
i.allWatchers.deleteWatcher(number, done)
}
}
func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) {
if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 {
klog.Warningf("Terminating all watchers from cacher %v", objectType)
}
i.allWatchers.terminateAll(done)
for _, watchers := range i.valueWatchers {
watchers.terminateAll(done)
}
i.valueWatchers = map[string]watchersMap{}
}
// As we don't need a high precision here, we keep all watchers timeout within a
// second in a bucket, and pop up them once at the timeout. To be more specific,
// if you set fire time at X, you can get the bookmark within (X-1,X+1) period.
type watcherBookmarkTimeBuckets struct {
lock sync.Mutex
// the key of watcherBuckets is the number of seconds since createTime
watchersBuckets map[int64][]*cacheWatcher
createTime time.Time
startBucketID int64
clock clock.Clock
bookmarkFrequency time.Duration
}
func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets {
return &watcherBookmarkTimeBuckets{
watchersBuckets: make(map[int64][]*cacheWatcher),
createTime: clock.Now(),
startBucketID: 0,
clock: clock,
bookmarkFrequency: bookmarkFrequency,
}
}
// adds a watcher to the bucket, if the deadline is before the start, it will be
// added to the first one.
func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency)
if !ok {
return false
}
bucketID := int64(nextTime.Sub(t.createTime) / time.Second)
t.lock.Lock()
defer t.lock.Unlock()
if bucketID < t.startBucketID {
bucketID = t.startBucketID
}
watchers, _ := t.watchersBuckets[bucketID]
t.watchersBuckets[bucketID] = append(watchers, w)
return true
}
func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher {
currentBucketID := int64(t.clock.Since(t.createTime) / time.Second)
// There should be one or two elements in almost all cases
expiredWatchers := make([][]*cacheWatcher, 0, 2)
t.lock.Lock()
defer t.lock.Unlock()
for ; t.startBucketID <= currentBucketID; t.startBucketID++ {
if watchers, ok := t.watchersBuckets[t.startBucketID]; ok {
delete(t.watchersBuckets, t.startBucketID)
expiredWatchers = append(expiredWatchers, watchers)
}
}
return expiredWatchers
}
type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool
type indexedTriggerFunc struct {
indexName string
indexerFunc storage.IndexerFunc
}
// Cacher is responsible for serving WATCH and LIST requests for a given
// resource from its internal cache and updating its cache in the background
// based on the underlying storage contents.
// Cacher implements storage.Interface (although most of the calls are just
// delegated to the underlying storage).
type Cacher struct {
// HighWaterMarks for performance debugging.
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
// See: https://golang.org/pkg/sync/atomic/ for more information
incomingHWM storage.HighWaterMark
// Incoming events that should be dispatched to watchers.
incoming chan watchCacheEvent
sync.RWMutex
// Before accessing the cacher's cache, wait for the ready to be ok.
// This is necessary to prevent users from accessing structures that are
// uninitialized or are being repopulated right now.
// ready needs to be set to false when the cacher is paused or stopped.
// ready needs to be set to true when the cacher is ready to use after
// initialization.
ready *ready
// Underlying storage.Interface.
storage storage.Interface
// Expected type of objects in the underlying cache.
objectType reflect.Type
// "sliding window" of recent changes of objects and the current state.
watchCache *watchCache
reflector *cache.Reflector
// Versioner is used to handle resource versions.
versioner storage.Versioner
// newFunc is a function that creates new empty object storing a object of type Type.
newFunc func() runtime.Object
// indexedTrigger is used for optimizing amount of watchers that needs to process
// an incoming event.
indexedTrigger *indexedTriggerFunc
// watchers is mapping from the value of trigger function that a
// watcher is interested into the watchers
watcherIdx int
watchers indexedWatchers
// Defines a time budget that can be spend on waiting for not-ready watchers
// while dispatching event before shutting them down.
dispatchTimeoutBudget timeBudget
// Handling graceful termination.
stopLock sync.RWMutex
stopped bool
stopCh chan struct{}
stopWg sync.WaitGroup
clock clock.Clock
// timer is used to avoid unnecessary allocations in underlying watchers.
timer *time.Timer
// dispatching determines whether there is currently dispatching of
// any event in flight.
dispatching bool
// watchersBuffer is a list of watchers potentially interested in currently
// dispatched event.
watchersBuffer []*cacheWatcher
// blockedWatchers is a list of watchers whose buffer is currently full.
blockedWatchers []*cacheWatcher
// watchersToStop is a list of watchers that were supposed to be stopped
// during current dispatching, but stopping was deferred to the end of
// dispatching that event to avoid race with closing channels in watchers.
watchersToStop []*cacheWatcher
// Maintain a timeout queue to send the bookmark event before the watcher times out.
bookmarkWatchers *watcherBookmarkTimeBuckets
}
// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from
// its internal cache and updating its cache in the background based on the
// given configuration.
func NewCacherFromConfig(config Config) (*Cacher, error) {
stopCh := make(chan struct{})
obj := config.NewFunc()
// Give this error when it is constructed rather than when you get the
// first watch item, because it's much easier to track down that way.
if err := runtime.CheckCodec(config.Codec, obj); err != nil {
return nil, fmt.Errorf("storage codec doesn't seem to match given type: %v", err)
}
var indexedTrigger *indexedTriggerFunc
if config.IndexerFuncs != nil {
// For now, we don't support multiple trigger functions defined
// for a given resource.
if len(config.IndexerFuncs) > 1 {
return nil, fmt.Errorf("cacher %s doesn't support more than one IndexerFunc: ", reflect.TypeOf(obj).String())
}
for key, value := range config.IndexerFuncs {
if value != nil {
indexedTrigger = &indexedTriggerFunc{
indexName: key,
indexerFunc: value,
}
}
}
}
if config.Clock == nil {
config.Clock = clock.RealClock{}
}
objType := reflect.TypeOf(obj)
cacher := &Cacher{
ready: newReady(),
storage: config.Storage,
objectType: objType,
versioner: config.Versioner,
newFunc: config.NewFunc,
indexedTrigger: indexedTrigger,
watcherIdx: 0,
watchers: indexedWatchers{
allWatchers: make(map[int]*cacheWatcher),
valueWatchers: make(map[string]watchersMap),
},
// TODO: Figure out the correct value for the buffer size.
incoming: make(chan watchCacheEvent, 100),
dispatchTimeoutBudget: newTimeBudget(stopCh),
// We need to (potentially) stop both:
// - wait.Until go-routine
// - reflector.ListAndWatch
// and there are no guarantees on the order that they will stop.
// So we will be simply closing the channel, and synchronizing on the WaitGroup.
stopCh: stopCh,
clock: config.Clock,
timer: time.NewTimer(time.Duration(0)),
bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency),
}
// Ensure that timer is stopped.
if !cacher.timer.Stop() {
// Consume triggered (but not yet received) timer event
// so that future reuse does not get a spurious timeout.
<-cacher.timer.C
}
watchCache := newWatchCache(
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType)
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
// Configure reflector's pager to for an appropriate pagination chunk size for fetching data from
// storage. The pager falls back to full list if paginated list calls fail due to an "Expired" error.
reflector.WatchListPageSize = storageWatchListPageSize
cacher.watchCache = watchCache
cacher.reflector = reflector
go cacher.dispatchEvents()
cacher.stopWg.Add(1)
go func() {
defer cacher.stopWg.Done()
defer cacher.terminateAllWatchers()
wait.Until(
func() {
if !cacher.isStopped() {
cacher.startCaching(stopCh)
}
}, time.Second, stopCh,
)
}()
return cacher, nil
}
func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
// The 'usable' lock is always 'RLock'able when it is safe to use the cache.
// It is safe to use the cache after a successful list until a disconnection.
// We start with usable (write) locked. The below OnReplace function will
// unlock it after a successful list. The below defer will then re-lock
// it when this function exits (always due to disconnection), only if
// we actually got a successful list. This cycle will repeat as needed.
successfulList := false
c.watchCache.SetOnReplace(func() {
successfulList = true
c.ready.set(true)
klog.V(1).Infof("cacher (%v): initialized", c.objectType.String())
})
defer func() {
if successfulList {
c.ready.set(false)
}
}()
c.terminateAllWatchers()
// Note that since onReplace may be not called due to errors, we explicitly
// need to retry it on errors under lock.
// Also note that startCaching is called in a loop, so there's no need
// to have another loop here.
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err)
}
}
// Versioner implements storage.Interface.
func (c *Cacher) Versioner() storage.Versioner {
return c.storage.Versioner()
}
// Create implements storage.Interface.
func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
return c.storage.Create(ctx, key, obj, out, ttl)
}
// Delete implements storage.Interface.
func (c *Cacher) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, _ runtime.Object) error {
// Ignore the suggestion and try to pass down the current version of the object
// read from cache.
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
klog.Errorf("GetByKey returned error: %v", err)
} else if exists {
// DeepCopy the object since we modify resource version when serializing the
// current object.
currObj := elem.(*storeElement).Object.DeepCopyObject()
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj)
}
// If we couldn't get the object, fallback to no-suggestion.
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil)
}
// Watch implements storage.Interface.
func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
pred := opts.Predicate
watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
c.ready.wait()
triggerValue, triggerSupported := "", false
if c.indexedTrigger != nil {
for _, field := range pred.IndexFields {
if field == c.indexedTrigger.indexName {
if value, ok := pred.Field.RequiresExactMatch(field); ok {
triggerValue, triggerSupported = value, true
}
}
}
}
// If there is indexedTrigger defined, but triggerSupported is false,
// we can't narrow the amount of events significantly at this point.
//
// That said, currently indexedTrigger is defined only for couple resources:
// Pods, Nodes, Secrets and ConfigMaps and there is only a constant
// number of watchers for which triggerSupported is false (excluding those
// issued explicitly by users).
// Thus, to reduce the risk of those watchers blocking all watchers of a
// given resource in the system, we increase the sizes of buffers for them.
chanSize := 10
if c.indexedTrigger != nil && !triggerSupported {
// TODO: We should tune this value and ideally make it dependent on the
// number of objects of a given type and/or their churn.
chanSize = 1000
}
// Determine watch timeout('0' means deadline is not set, ignore checking)
deadline, _ := ctx.Deadline()
// Create a watcher here to reduce memory allocations under lock,
// given that memory allocation may trigger GC and block the thread.
// Also note that emptyFunc is a placeholder, until we will be able
// to compute watcher.forget function (which has to happen under lock).
watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType)
// We explicitly use thread unsafe version and do locking ourself to ensure that
// no new events will be processed in the meantime. The watchCache will be unlocked
// on return from this function.
// Note that we cannot do it under Cacher lock, to avoid a deadlock, since the
// underlying watchCache is calling processEvent under its lock.
c.watchCache.RLock()
defer c.watchCache.RUnlock()
initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV)
if err != nil {
// To match the uncached watch implementation, once we have passed authn/authz/admission,
// and successfully parsed a resource version, other errors must fail with a watch event of type ERROR,
// rather than a directly returned error.
return newErrWatcher(err), nil
}
// With some events already sent, update resourceVersion so that
// events that were buffered and not yet processed won't be delivered
// to this watcher second time causing going back in time.
if len(initEvents) > 0 {
watchRV = initEvents[len(initEvents)-1].ResourceVersion
}
func() {
c.Lock()
defer c.Unlock()
// Update watcher.forget function once we can compute it.
watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported)
c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported)
// Add it to the queue only when the client support watch bookmarks.
if watcher.allowWatchBookmarks {
c.bookmarkWatchers.addWatcher(watcher)
}
c.watcherIdx++
}()
go watcher.process(ctx, initEvents, watchRV)
return watcher, nil
}
// WatchList implements storage.Interface.
func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return c.Watch(ctx, key, opts)
}
// Get implements storage.Interface.
func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
if opts.ResourceVersion == "" {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility).
return c.storage.Get(ctx, key, opts, objPtr)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return err
}
if getRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.Get(ctx, key, opts, objPtr)
}
// Do not create a trace - it's not for free and there are tons
// of Get requests. We can add it if it will be really needed.
c.ready.wait()
objVal, err := conversion.EnforcePtr(objPtr)
if err != nil {
return err
}
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(getRV, key, nil)
if err != nil {
return err
}
if exists {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
objVal.Set(reflect.ValueOf(elem.Object).Elem())
} else {
objVal.Set(reflect.Zero(objVal.Type()))
if !opts.IgnoreNotFound {
return storage.NewKeyNotFoundError(key, int64(readResourceVersion))
}
}
return nil
}
// GetToList implements storage.Interface.
func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
pred := opts.Predicate
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
hasContinuation := pagingEnabled && len(pred.Continue) > 0
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility). If a continuation is
// requested, serve it from the underlying storage as well.
// Limits are only sent to storage when resourceVersion is non-zero
// since the watch cache isn't able to perform continuations, and
// limits are ignored when resource version is zero
return c.storage.GetToList(ctx, key, opts, listObj)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return err
}
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.GetToList(ctx, key, opts, listObj)
}
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
defer trace.LogIfLong(500 * time.Millisecond)
c.ready.wait()
trace.Step("Ready")
// List elements with at least 'listRV' from cache.
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
listVal, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
if listVal.Kind() != reflect.Slice {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
filter := filterWithAttrsFunction(key, pred)
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(listRV, key, trace)
if err != nil {
return err
}
trace.Step("Got from cache")
if exists {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
if filter(elem.Key, elem.Labels, elem.Fields) {
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
}
}
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
return err
}
}
return nil
}
// List implements storage.Interface.
func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
pred := opts.Predicate
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
hasContinuation := pagingEnabled && len(pred.Continue) > 0
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
// If resourceVersion is not specified, serve it from underlying
// storage (for backward compatibility). If a continuation is
// requested, serve it from the underlying storage as well.
// Limits are only sent to storage when resourceVersion is non-zero
// since the watch cache isn't able to perform continuations, and
// limits are ignored when resource version is zero.
return c.storage.List(ctx, key, opts, listObj)
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return err
}
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.List(ctx, key, opts, listObj)
}
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
defer trace.LogIfLong(500 * time.Millisecond)
c.ready.wait()
trace.Step("Ready")
// List elements with at least 'listRV' from cache.
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
listVal, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
if listVal.Kind() != reflect.Slice {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
filter := filterWithAttrsFunction(key, pred)
objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV, pred.MatcherIndex(), trace)
if err != nil {
return err
}
trace.Step("Listed items from cache", utiltrace.Field{"count", len(objs)})
if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() {
// Resize the slice appropriately, since we already know that none
// of the elements will be filtered out.
listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs)))
trace.Step("Resized result")
}
for _, obj := range objs {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
if filter(elem.Key, elem.Labels, elem.Fields) {
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
}
}
trace.Step("Filtered items", utiltrace.Field{"count", listVal.Len()})
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
return err
}
}
return nil
}
// GuaranteedUpdate implements storage.Interface.
func (c *Cacher) GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ runtime.Object) error {
// Ignore the suggestion and try to pass down the current version of the object
// read from cache.
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
klog.Errorf("GetByKey returned error: %v", err)
} else if exists {
currObj := elem.(*storeElement).Object.DeepCopyObject()
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)
}
// If we couldn't get the object, fallback to no-suggestion.
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, nil)
}
// Count implements storage.Interface.
func (c *Cacher) Count(pathPrefix string) (int64, error) {
return c.storage.Count(pathPrefix)
}
// baseObjectThreadUnsafe omits locking for cachingObject.
func baseObjectThreadUnsafe(object runtime.Object) runtime.Object {
if co, ok := object.(*cachingObject); ok {
return co.object
}
return object
}
func (c *Cacher) triggerValuesThreadUnsafe(event *watchCacheEvent) ([]string, bool) {
if c.indexedTrigger == nil {
return nil, false
}
result := make([]string, 0, 2)
result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object)))
if event.PrevObject == nil {
return result, true
}
prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject))
if result[0] != prevTriggerValue {
result = append(result, prevTriggerValue)
}
return result, true
}
func (c *Cacher) processEvent(event *watchCacheEvent) {
if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) {
// Monitor if this gets backed up, and how much.
klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen)
}
c.incoming <- *event
}
func (c *Cacher) dispatchEvents() {
// Jitter to help level out any aggregate load.
bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25))
defer bookmarkTimer.Stop()
lastProcessedResourceVersion := uint64(0)
for {
select {
case event, ok := <-c.incoming:
if !ok {
return
}
// Don't dispatch bookmarks coming from the storage layer.
// They can be very frequent (even to the level of subseconds)
// to allow efficient watch resumption on kube-apiserver restarts,
// and propagating them down may overload the whole system.
//
// TODO: If at some point we decide the performance and scalability
// footprint is acceptable, this is the place to hook them in.
// However, we then need to check if this was called as a result
// of a bookmark event or regular Add/Update/Delete operation by
// checking if resourceVersion here has changed.
if event.Type != watch.Bookmark {
c.dispatchEvent(&event)
}
lastProcessedResourceVersion = event.ResourceVersion
case <-bookmarkTimer.C():
bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25))
// Never send a bookmark event if we did not see an event here, this is fine
// because we don't provide any guarantees on sending bookmarks.
if lastProcessedResourceVersion == 0 {
// pop expired watchers in case there has been no update
c.bookmarkWatchers.popExpiredWatchers()
continue
}
bookmarkEvent := &watchCacheEvent{
Type: watch.Bookmark,
Object: c.newFunc(),
ResourceVersion: lastProcessedResourceVersion,
}
if err := c.versioner.UpdateObject(bookmarkEvent.Object, bookmarkEvent.ResourceVersion); err != nil {
klog.Errorf("failure to set resourceVersion to %d on bookmark event %+v", bookmarkEvent.ResourceVersion, bookmarkEvent.Object)
continue
}
c.dispatchEvent(bookmarkEvent)
case <-c.stopCh:
return
}
}
}
func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) {
switch event.Type {
case watch.Added, watch.Modified:
if object, err := newCachingObject(event.Object); err == nil {
event.Object = object
} else {
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
}
// Don't wrap PrevObject for update event (for create events it is nil).
// We only encode those to deliver DELETE watch events, so if
// event.Object is not nil it can be used only for watchers for which
// selector was satisfied for its previous version and is no longer
// satisfied for the current version.
// This is rare enough that it doesn't justify making deep-copy of the
// object (done by newCachingObject) every time.
case watch.Deleted:
// Don't wrap Object for delete events - these are not to deliver any
// events. Only wrap PrevObject.
if object, err := newCachingObject(event.PrevObject); err == nil {
// Update resource version of the underlying object.
// event.PrevObject is used to deliver DELETE watch events and
// for them, we set resourceVersion to <current> instead of
// the resourceVersion of the last modification of the object.
updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion)
event.PrevObject = object
} else {
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
}
}
}
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
c.startDispatching(event)
defer c.finishDispatching()
// Watchers stopped after startDispatching will be delayed to finishDispatching,
// Since add() can block, we explicitly add when cacher is unlocked.
// Dispatching event in nonblocking way first, which make faster watchers
// not be blocked by slower ones.
if event.Type == watch.Bookmark {
for _, watcher := range c.watchersBuffer {
watcher.nonblockingAdd(event)
}
} else {
// Set up caching of object serializations only for dispatching this event.
//
// Storing serializations in memory would result in increased memory usage,
// but it would help for caching encodings for watches started from old
// versions. However, we still don't have a convincing data that the gain
// from it justifies increased memory usage, so for now we drop the cached
// serializations after dispatching this event.
//
// Given the deep-copies that are done to create cachingObjects,
// we try to cache serializations only if there are at least 3 watchers.
if len(c.watchersBuffer) >= 3 {
// Make a shallow copy to allow overwriting Object and PrevObject.
wcEvent := *event
setCachingObjects(&wcEvent, c.versioner)
event = &wcEvent
}
c.blockedWatchers = c.blockedWatchers[:0]
for _, watcher := range c.watchersBuffer {
if !watcher.nonblockingAdd(event) {
c.blockedWatchers = append(c.blockedWatchers, watcher)
}
}
if len(c.blockedWatchers) > 0 {
// dispatchEvent is called very often, so arrange
// to reuse timers instead of constantly allocating.
startTime := time.Now()
timeout := c.dispatchTimeoutBudget.takeAvailable()
c.timer.Reset(timeout)
// Make sure every watcher will try to send event without blocking first,
// even if the timer has already expired.
timer := c.timer
for _, watcher := range c.blockedWatchers {
if !watcher.add(event, timer) {
// fired, clean the timer by set it to nil.
timer = nil
}
}
// Stop the timer if it is not fired
if timer != nil && !timer.Stop() {
// Consume triggered (but not yet received) timer event
// so that future reuse does not get a spurious timeout.
<-timer.C
}
c.dispatchTimeoutBudget.returnUnused(timeout - time.Since(startTime))
}
}
}
func (c *Cacher) startDispatchingBookmarkEvents() {
// Pop already expired watchers. However, explicitly ignore stopped ones,
// as we don't delete watcher from bookmarkWatchers when it is stopped.
for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() {
for _, watcher := range watchers {
// c.Lock() is held here.
// watcher.stopThreadUnsafe() is protected by c.Lock()
if watcher.stopped {
continue
}
c.watchersBuffer = append(c.watchersBuffer, watcher)
// Requeue the watcher for the next bookmark if needed.
c.bookmarkWatchers.addWatcher(watcher)
}
}
}
// startDispatching chooses watchers potentially interested in a given event
// a marks dispatching as true.
func (c *Cacher) startDispatching(event *watchCacheEvent) {
// It is safe to call triggerValuesThreadUnsafe here, because at this
// point only this thread can access this event (we create a separate
// watchCacheEvent for every dispatch).
triggerValues, supported := c.triggerValuesThreadUnsafe(event)
c.Lock()
defer c.Unlock()
c.dispatching = true
// We are reusing the slice to avoid memory reallocations in every
// dispatchEvent() call. That may prevent Go GC from freeing items
// from previous phases that are sitting behind the current length
// of the slice, but there is only a limited number of those and the
// gain from avoiding memory allocations is much bigger.
c.watchersBuffer = c.watchersBuffer[:0]
if event.Type == watch.Bookmark {
c.startDispatchingBookmarkEvents()
// return here to reduce following code indentation and diff
return
}
// Iterate over "allWatchers" no matter what the trigger function is.
for _, watcher := range c.watchers.allWatchers {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
if supported {
// Iterate over watchers interested in the given values of the trigger.
for _, triggerValue := range triggerValues {
for _, watcher := range c.watchers.valueWatchers[triggerValue] {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
}
} else {
// supported equal to false generally means that trigger function
// is not defined (or not aware of any indexes). In this case,
// watchers filters should generally also don't generate any
// trigger values, but can cause problems in case of some
// misconfiguration. Thus we paranoidly leave this branch.
// Iterate over watchers interested in exact values for all values.
for _, watchers := range c.watchers.valueWatchers {
for _, watcher := range watchers {
c.watchersBuffer = append(c.watchersBuffer, watcher)
}
}
}
}
// finishDispatching stops all the watchers that were supposed to be
// stopped in the meantime, but it was deferred to avoid closing input
// channels of watchers, as add() may still have writing to it.
// It also marks dispatching as false.
func (c *Cacher) finishDispatching() {
c.Lock()
defer c.Unlock()
c.dispatching = false
for _, watcher := range c.watchersToStop {
watcher.stopThreadUnsafe()
}
c.watchersToStop = c.watchersToStop[:0]
}
func (c *Cacher) terminateAllWatchers() {
c.Lock()
defer c.Unlock()
c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe)
}
func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) {
if c.dispatching {
c.watchersToStop = append(c.watchersToStop, watcher)
} else {
watcher.stopThreadUnsafe()
}
}
func (c *Cacher) isStopped() bool {
c.stopLock.RLock()
defer c.stopLock.RUnlock()
return c.stopped
}
// Stop implements the graceful termination.
func (c *Cacher) Stop() {
c.stopLock.Lock()
if c.stopped {
// avoid stopping twice (note: cachers are shared with subresources)
c.stopLock.Unlock()
return
}
c.stopped = true
c.stopLock.Unlock()
close(c.stopCh)
c.stopWg.Wait()
}
func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() {
return func() {
c.Lock()
defer c.Unlock()
// It's possible that the watcher is already not in the structure (e.g. in case of
// simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopThreadUnsafe()
// on a watcher multiple times.
c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe)
}
}
func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc {
filterFunc := func(objKey string, label labels.Set, field fields.Set) bool {
if !hasPathPrefix(objKey, key) {
return false
}
return p.MatchesObjectAttributes(label, field)
}
return filterFunc
}
// LastSyncResourceVersion returns resource version to which the underlying cache is synced.
func (c *Cacher) LastSyncResourceVersion() (uint64, error) {
c.ready.wait()
resourceVersion := c.reflector.LastSyncResourceVersion()
return c.versioner.ParseResourceVersion(resourceVersion)
}
// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher.
type cacherListerWatcher struct {
storage storage.Interface
resourcePrefix string
newListFunc func() runtime.Object
}
// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher.
func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
return &cacherListerWatcher{
storage: storage,
resourcePrefix: resourcePrefix,
newListFunc: newListFunc,
}
}
// Implements cache.ListerWatcher interface.
func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
list := lw.newListFunc()
pred := storage.SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
Limit: options.Limit,
Continue: options.Continue,
}
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil {
return nil, err
}
return list, nil
}
// Implements cache.ListerWatcher interface.
func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
opts := storage.ListOptions{
ResourceVersion: options.ResourceVersion,
Predicate: storage.Everything,
}
if utilfeature.DefaultFeatureGate.Enabled(features.EfficientWatchResumption) {
opts.ProgressNotify = true
}
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, opts)
}
// errWatcher implements watch.Interface to return a single error
type errWatcher struct {
result chan watch.Event
}
func newErrWatcher(err error) *errWatcher {
// Create an error event
errEvent := watch.Event{Type: watch.Error}
switch err := err.(type) {
case runtime.Object:
errEvent.Object = err
case *errors.StatusError:
errEvent.Object = &err.ErrStatus
default:
errEvent.Object = &metav1.Status{
Status: metav1.StatusFailure,
Message: err.Error(),
Reason: metav1.StatusReasonInternalError,
Code: http.StatusInternalServerError,
}
}
// Create a watcher with room for a single event, populate it, and close the channel
watcher := &errWatcher{result: make(chan watch.Event, 1)}
watcher.result <- errEvent
close(watcher.result)
return watcher
}
// Implements watch.Interface.
func (c *errWatcher) ResultChan() <-chan watch.Event {
return c.result
}
// Implements watch.Interface.
func (c *errWatcher) Stop() {
// no-op
}
// cacheWatcher implements watch.Interface
// this is not thread-safe
type cacheWatcher struct {
input chan *watchCacheEvent
result chan watch.Event
done chan struct{}
filter filterWithAttrsFunc
stopped bool
forget func()
versioner storage.Versioner
// The watcher will be closed by server after the deadline,
// save it here to send bookmark events before that.
deadline time.Time
allowWatchBookmarks bool
// Object type of the cache watcher interests
objectType reflect.Type
}
func newCacheWatcher(chanSize int, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner, deadline time.Time, allowWatchBookmarks bool, objectType reflect.Type) *cacheWatcher {
return &cacheWatcher{
input: make(chan *watchCacheEvent, chanSize),
result: make(chan watch.Event, chanSize),
done: make(chan struct{}),
filter: filter,
stopped: false,
forget: forget,
versioner: versioner,
deadline: deadline,
allowWatchBookmarks: allowWatchBookmarks,
objectType: objectType,
}
}
// Implements watch.Interface.
func (c *cacheWatcher) ResultChan() <-chan watch.Event {
return c.result
}
// Implements watch.Interface.
func (c *cacheWatcher) Stop() {
c.forget()
}
// we rely on the fact that stopThredUnsafe is actually protected by Cacher.Lock()
func (c *cacheWatcher) stopThreadUnsafe() {
if !c.stopped {
c.stopped = true
close(c.done)
close(c.input)
}
}
func (c *cacheWatcher) nonblockingAdd(event *watchCacheEvent) bool {
select {
case c.input <- event:
return true
default:
return false
}
}
// Nil timer means that add will not block (if it can't send event immediately, it will break the watcher)
func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool {
// Try to send the event immediately, without blocking.
if c.nonblockingAdd(event) {
return true
}
closeFunc := func() {
// This means that we couldn't send event to that watcher.
// Since we don't want to block on it infinitely,
// we simply terminate it.
klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String())
c.forget()
}
if timer == nil {
closeFunc()
return false
}
// OK, block sending, but only until timer fires.
select {
case c.input <- event:
return true
case <-timer.C:
closeFunc()
return false
}
}
func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) {
// We try to send bookmarks:
// (a) roughly every minute
// (b) right before the watcher timeout - for now we simply set it 2s before
// the deadline
// The former gives us periodicity if the watch breaks due to unexpected
// conditions, the later ensures that on timeout the watcher is as close to
// now as possible - this covers 99% of cases.
heartbeatTime := now.Add(bookmarkFrequency)
if c.deadline.IsZero() {
// Timeout is set by our client libraries (e.g. reflector) as well as defaulted by
// apiserver if properly configured. So this shoudln't happen in practice.
return heartbeatTime, true
}
if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) {
heartbeatTime = pretimeoutTime
}
if heartbeatTime.Before(now) {
return time.Time{}, false
}
return heartbeatTime, true
}
func getEventObject(object runtime.Object) runtime.Object {
if _, ok := object.(runtime.CacheableObject); ok {
// It is safe to return without deep-copy, because the underlying
// object was already deep-copied during construction.
return object
}
return object.DeepCopyObject()
}
func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) {
if _, ok := object.(*cachingObject); ok {
// We assume that for cachingObject resourceVersion was already propagated before.
return
}
if err := versioner.UpdateObject(object, resourceVersion); err != nil {
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err))
}
}
func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event {
if event.Type == watch.Bookmark {
return &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()}
}
curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields)
oldObjPasses := false
if event.PrevObject != nil {
oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields)
}
if !curObjPasses && !oldObjPasses {
// Watcher is not interested in that object.
return nil
}
switch {
case curObjPasses && !oldObjPasses:
return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)}
case curObjPasses && oldObjPasses:
return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)}
case !curObjPasses && oldObjPasses:
// return a delete event with the previous object content, but with the event's resource version
oldObj := getEventObject(event.PrevObject)
updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion)
return &watch.Event{Type: watch.Deleted, Object: oldObj}
}
return nil
}
// NOTE: sendWatchCacheEvent is assumed to not modify <event> !!!
func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) {
watchEvent := c.convertToWatchEvent(event)
if watchEvent == nil {
// Watcher is not interested in that object.
return
}
// We need to ensure that if we put event X to the c.result, all
// previous events were already put into it before, no matter whether
// c.done is close or not.
// Thus we cannot simply select from c.done and c.result and this
// would give us non-determinism.
// At the same time, we don't want to block infinitely on putting
// to c.result, when c.done is already closed.
// This ensures that with c.done already close, we at most once go
// into the next select after this. With that, no matter which
// statement we choose there, we will deliver only consecutive
// events.
select {
case <-c.done:
return
default:
}
select {
case c.result <- *watchEvent:
case <-c.done:
}
}
func (c *cacheWatcher) process(ctx context.Context, initEvents []*watchCacheEvent, resourceVersion uint64) {
defer utilruntime.HandleCrash()
// Check how long we are processing initEvents.
// As long as these are not processed, we are not processing
// any incoming events, so if it takes long, we may actually
// block all watchers for some time.
// TODO: From the logs it seems that there happens processing
// times even up to 1s which is very long. However, this doesn't
// depend that much on the number of initEvents. E.g. from the
// 2000-node Kubemark run we have logs like this, e.g.:
// ... processing 13862 initEvents took 66.808689ms
// ... processing 14040 initEvents took 993.532539ms
// We should understand what is blocking us in those cases (e.g.
// is it lack of CPU, network, or sth else) and potentially
// consider increase size of result buffer in those cases.
const initProcessThreshold = 500 * time.Millisecond
startTime := time.Now()
for _, event := range initEvents {
c.sendWatchCacheEvent(event)
}
objType := c.objectType.String()
if len(initEvents) > 0 {
initCounter.WithLabelValues(objType).Add(float64(len(initEvents)))
}
processingTime := time.Since(startTime)
if processingTime > initProcessThreshold {
klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime)
}
defer close(c.result)
defer c.Stop()
for {
select {
case event, ok := <-c.input:
if !ok {
return
}
// only send events newer than resourceVersion
if event.ResourceVersion > resourceVersion {
c.sendWatchCacheEvent(event)
}
case <-ctx.Done():
return
}
}
}
type ready struct {
ok bool
c *sync.Cond
}
func newReady() *ready {
return &ready{c: sync.NewCond(&sync.RWMutex{})}
}
func (r *ready) wait() {
r.c.L.Lock()
for !r.ok {
r.c.Wait()
}
r.c.L.Unlock()
}
// TODO: Make check() function more sophisticated, in particular
// allow it to behave as "waitWithTimeout".
func (r *ready) check() bool {
rwMutex := r.c.L.(*sync.RWMutex)
rwMutex.RLock()
defer rwMutex.RUnlock()
return r.ok
}
func (r *ready) set(ok bool) {
r.c.L.Lock()
defer r.c.L.Unlock()
r.ok = ok
r.c.Broadcast()
}
| staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.029796086251735687,
0.0005797585472464561,
0.00016031175618991256,
0.00017259264132007957,
0.003221148857846856
] |
{
"id": 4,
"code_window": [
"\t\t}\n",
"\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n",
"\t}\n",
"\n",
"\tvar origState *objState\n",
"\tvar mustCheckData bool\n",
"\tif suggestion != nil {\n",
"\t\torigState, err = s.getStateFromObject(suggestion)\n",
"\t\tmustCheckData = true\n",
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tvar origStateIsCurrent bool\n",
"\tif cachedExistingObject != nil {\n",
"\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 308
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
appsapiv1 "k8s.io/api/apps/v1"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/apps"
controllerrevisionsstore "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage"
daemonsetstore "k8s.io/kubernetes/pkg/registry/apps/daemonset/storage"
deploymentstore "k8s.io/kubernetes/pkg/registry/apps/deployment/storage"
replicasetstore "k8s.io/kubernetes/pkg/registry/apps/replicaset/storage"
statefulsetstore "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage"
)
// StorageProvider is a struct for apps REST storage.
type StorageProvider struct{}
// NewRESTStorage returns APIGroupInfo object.
func (p StorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool, error) {
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(apps.GroupName, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs)
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
// TODO refactor the plumbing to provide the information in the APIGroupInfo
if apiResourceConfigSource.VersionEnabled(appsapiv1.SchemeGroupVersion) {
storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter)
if err != nil {
return genericapiserver.APIGroupInfo{}, false, err
}
apiGroupInfo.VersionedResourcesStorageMap[appsapiv1.SchemeGroupVersion.Version] = storageMap
}
return apiGroupInfo, true, nil
}
func (p StorageProvider) v1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) {
storage := map[string]rest.Storage{}
// deployments
deploymentStorage, err := deploymentstore.NewStorage(restOptionsGetter)
if err != nil {
return storage, err
}
storage["deployments"] = deploymentStorage.Deployment
storage["deployments/status"] = deploymentStorage.Status
storage["deployments/scale"] = deploymentStorage.Scale
// statefulsets
statefulSetStorage, err := statefulsetstore.NewStorage(restOptionsGetter)
if err != nil {
return storage, err
}
storage["statefulsets"] = statefulSetStorage.StatefulSet
storage["statefulsets/status"] = statefulSetStorage.Status
storage["statefulsets/scale"] = statefulSetStorage.Scale
// daemonsets
daemonSetStorage, daemonSetStatusStorage, err := daemonsetstore.NewREST(restOptionsGetter)
if err != nil {
return storage, err
}
storage["daemonsets"] = daemonSetStorage
storage["daemonsets/status"] = daemonSetStatusStorage
// replicasets
replicaSetStorage, err := replicasetstore.NewStorage(restOptionsGetter)
if err != nil {
return storage, err
}
storage["replicasets"] = replicaSetStorage.ReplicaSet
storage["replicasets/status"] = replicaSetStorage.Status
storage["replicasets/scale"] = replicaSetStorage.Scale
// controllerrevisions
historyStorage, err := controllerrevisionsstore.NewREST(restOptionsGetter)
if err != nil {
return storage, err
}
storage["controllerrevisions"] = historyStorage
return storage, nil
}
// GroupName returns name of the group
func (p StorageProvider) GroupName() string {
return apps.GroupName
}
| pkg/registry/apps/rest/storage_apps.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00030112636159174144,
0.00018380001711193472,
0.00015911765513010323,
0.0001729057403281331,
0.00003745127105503343
] |
{
"id": 4,
"code_window": [
"\t\t}\n",
"\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n",
"\t}\n",
"\n",
"\tvar origState *objState\n",
"\tvar mustCheckData bool\n",
"\tif suggestion != nil {\n",
"\t\torigState, err = s.getStateFromObject(suggestion)\n",
"\t\tmustCheckData = true\n",
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tvar origStateIsCurrent bool\n",
"\tif cachedExistingObject != nil {\n",
"\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 308
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
"context"
time "time"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
v1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
cache "k8s.io/client-go/tools/cache"
)
// NetworkPolicyInformer provides access to a shared informer and lister for
// NetworkPolicies.
type NetworkPolicyInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1beta1.NetworkPolicyLister
}
type networkPolicyInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ExtensionsV1beta1().NetworkPolicies(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options)
},
},
&extensionsv1beta1.NetworkPolicy{},
resyncPeriod,
indexers,
)
}
func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer)
}
func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister {
return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer())
}
| staging/src/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00018762612307909876,
0.00017531556659378111,
0.00016912103455979377,
0.00017433418543078005,
0.000005169208179722773
] |
{
"id": 4,
"code_window": [
"\t\t}\n",
"\t\treturn s.getState(getResp, key, v, ignoreNotFound)\n",
"\t}\n",
"\n",
"\tvar origState *objState\n",
"\tvar mustCheckData bool\n",
"\tif suggestion != nil {\n",
"\t\torigState, err = s.getStateFromObject(suggestion)\n",
"\t\tmustCheckData = true\n",
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tvar origStateIsCurrent bool\n",
"\tif cachedExistingObject != nil {\n",
"\t\torigState, err = s.getStateFromObject(cachedExistingObject)\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 308
} | = vendor/github.com/go-openapi/swag licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/go-openapi/swag/LICENSE 3b83ef96387f14655fc854ddc3c6bd57
| LICENSES/vendor/github.com/go-openapi/swag/LICENSE | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00018060860747937113,
0.0001766009081620723,
0.00016785573097877204,
0.0001769783702911809,
0.0000028033343824063195
] |
{
"id": 5,
"code_window": [
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n",
"\t}\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "add",
"edit_start_line_idx": 314
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9986299276351929,
0.14571793377399445,
0.00016466820670757443,
0.00035331075196154416,
0.3433558940887451
] |
{
"id": 5,
"code_window": [
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n",
"\t}\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "add",
"edit_start_line_idx": 314
} | {% panel style="success", title="Providing Feedback" %}
**Provide feedback at the [survey](https://www.surveymonkey.com/r/JH35X82)**
{% endpanel %}
{% panel style="warning", title="Experimental" %}
**Content in this chapter is experimental and will evolve based on user feedback.**
Leave feedback on the conventions by creating an issue in the [kubectl](https://github.com/kubernetes/kubectl/issues)
GitHub repository.
Also provide feedback on new kubectl docs at the [survey](https://www.surveymonkey.com/r/JH35X82)
{% endpanel %}
{% panel style="info", title="TL;DR" %}
- Publish a White Box Application as a Base for other users to Kustomize
{% endpanel %}
# Publishing Bases
## Motivation
Users may want to run a common White Box Application without writing the Resource Config
for the Application from scratch. Instead they may want to consume ready-made Resource
Config published specifically for the White Box Application, and add customizations for
their specific needs.
- Run a White Box Application (e.g. Cassandra, MongoDB) instance from ready-made Resource Config
- Publish Resource Config to run an Application
## Publishing a White Box Base
{% method %}
White Box Applications may be published to a URL and consumed as Bases in an `kustomization.yaml`. It
can then be consumed in the following manner.
**Use Case:** Run a White Box Application published to GitHub.
{% sample lang="yaml" %}
**Input:** The kustomization.yaml file
```yaml
# kustomization.yaml
bases:
# GitHub URL
- github.com/kubernetes-sigs/kustomize/examples/multibases/dev/?ref=v1.0.6
```
**Applied:** The Resource that is Applied to the cluster
```yaml
# Resource comes from the Remote Base
apiVersion: v1
kind: Pod
metadata:
labels:
app: myapp
name: dev-myapp-pod
spec:
containers:
- image: nginx:1.7.9
name: nginx
```
{% endmethod %}
## Customizing White Box Bases
The White Box Application may be customized using the same techniques described in
[Bases and Variations](../app_customization/bases_and_variants.md).
## Versioning White Box Bases
White Box Bases may be versioned using the well known versioning techniques provided by Git.
**Tag:**
Bases may be versioned by applying a tag to the repo and modifying the url to point to the tag:
`github.com/kubernetes-sigs/kustomize/examples/multibases?ref=v1.0.6`
**Branch:**
Bases may be versioned by creating a branch and modifying the url to point to the branch:
`github.com/Liujingfang1/kustomize/examples/helloWorld?ref=repoUrl2`
**Commit:**
If the White Box Base has not been explicitly versioned by the maintainer, users may pin the
base to a specific commit:
`github.com/Liujingfang1/kustomize/examples/helloWorld?ref=7050a45134e9848fca214ad7e7007e96e5042c03`
## Forking a White Box Base
Uses may fork a White Box Base hosted on GitHub by forking the GitHub repo. This allows the user
complete control over changes to the Base. Users should periodically pull changes from the
upstream repo back into the fork to get bug fixes and optimizations.
| staging/src/k8s.io/kubectl/docs/book/pages/app_composition_and_deployment/publishing_bases.md | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017353634757455438,
0.00016962856170721352,
0.00016350398072972894,
0.00016971610602922738,
0.000003190289817212033
] |
{
"id": 5,
"code_window": [
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n",
"\t}\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "add",
"edit_start_line_idx": 314
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"errors"
"testing"
policyv1beta1 "k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/client-go/kubernetes/fake"
podapi "k8s.io/kubernetes/pkg/api/pod"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
)
func TestEviction(t *testing.T) {
testcases := []struct {
name string
pdbs []runtime.Object
eviction *policy.Eviction
badNameInURL bool
expectError bool
expectDeleted bool
podPhase api.PodPhase
podName string
}{
{
name: "matching pdbs with no disruptions allowed, pod running",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t1", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podPhase: api.PodRunning,
podName: "t1",
},
{
name: "matching pdbs with no disruptions allowed, pod pending",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t2", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podPhase: api.PodPending,
expectDeleted: true,
podName: "t2",
},
{
name: "matching pdbs with no disruptions allowed, pod succeeded",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t3", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podPhase: api.PodSucceeded,
expectDeleted: true,
podName: "t3",
},
{
name: "matching pdbs with no disruptions allowed, pod failed",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t4", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podPhase: api.PodFailed,
expectDeleted: true,
podName: "t4",
},
{
name: "matching pdbs with disruptions allowed",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 1},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t5", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectDeleted: true,
podName: "t5",
},
{
name: "non-matching pdbs",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"b": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t6", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectDeleted: true,
podName: "t6",
},
{
name: "matching pdbs with disruptions allowed but bad name in Url",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 1},
}},
badNameInURL: true,
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t7", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podName: "t7",
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
storage, _, statusStorage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
pod := validNewPod()
pod.Name = tc.podName
pod.Labels = map[string]string{"a": "true"}
pod.Spec.NodeName = "foo"
if _, err := storage.Create(testContext, pod, nil, &metav1.CreateOptions{}); err != nil {
t.Error(err)
}
if tc.podPhase != "" {
pod.Status.Phase = tc.podPhase
_, _, err := statusStorage.Update(testContext, pod.Name, rest.DefaultUpdatedObjectInfo(pod), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
client := fake.NewSimpleClientset(tc.pdbs...)
evictionRest := newEvictionStorage(storage.Store, client.PolicyV1beta1())
name := pod.Name
if tc.badNameInURL {
name += "bad-name"
}
_, err := evictionRest.Create(testContext, name, tc.eviction, nil, &metav1.CreateOptions{})
//_, err = evictionRest.Create(testContext, name, tc.eviction, nil, &metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("expected error=%v, got %v; name %v", tc.expectError, err, pod.Name)
return
}
if tc.badNameInURL {
if err == nil {
t.Error("expected error here, but got nil")
return
}
if err.Error() != "name in URL does not match name in Eviction object" {
t.Errorf("got unexpected error: %v", err)
}
}
if tc.expectError {
return
}
existingPod, err := storage.Get(testContext, pod.Name, &metav1.GetOptions{})
if tc.expectDeleted {
if !apierrors.IsNotFound(err) {
t.Errorf("expected to be deleted, lookup returned %#v", existingPod)
}
return
} else if apierrors.IsNotFound(err) {
t.Errorf("expected graceful deletion, got %v", err)
return
}
if err != nil {
t.Errorf("%#v", err)
return
}
if existingPod.(*api.Pod).DeletionTimestamp == nil {
t.Errorf("expected gracefully deleted pod with deletionTimestamp set, got %#v", existingPod)
}
})
}
}
func TestEvictionIngorePDB(t *testing.T) {
testcases := []struct {
name string
pdbs []runtime.Object
eviction *policy.Eviction
expectError bool
podPhase api.PodPhase
podName string
expectedDeleteCount int
podTerminating bool
prc *api.PodCondition
}{
{
name: "pdbs No disruptions allowed, pod pending, first delete conflict, pod still pending, pod deleted successfully",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t1", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podPhase: api.PodPending,
podName: "t1",
expectedDeleteCount: 3,
},
// This test case is critical. If it is removed or broken we may
// regress and allow a pod to be deleted without checking PDBs when the
// pod should not be deleted.
{
name: "pdbs No disruptions allowed, pod pending, first delete conflict, pod becomes running, continueToPDBs",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t2", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podPhase: api.PodPending,
podName: "t2",
expectedDeleteCount: 1,
},
{
name: "pdbs disruptions allowed, pod pending, first delete conflict, pod becomes running, continueToPDBs",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 1},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t3", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podPhase: api.PodPending,
podName: "t3",
expectedDeleteCount: 2,
},
{
name: "pod pending, always conflict on delete",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t4", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podPhase: api.PodPending,
podName: "t4",
expectedDeleteCount: EvictionsRetry.Steps,
},
{
name: "pod pending, always conflict on delete, user provided ResourceVersion constraint",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t5", Namespace: "default"}, DeleteOptions: metav1.NewRVDeletionPrecondition("userProvided")},
expectError: true,
podPhase: api.PodPending,
podName: "t5",
expectedDeleteCount: 1,
},
{
name: "matching pdbs with no disruptions allowed, pod terminating",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 0},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t6", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(300)},
expectError: false,
podName: "t6",
expectedDeleteCount: 1,
podTerminating: true,
},
{
name: "matching pdbs with no disruptions allowed, pod running, pod healthy, unhealthy pod not ours",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{
// This simulates 3 pods desired, our pod healthy, unhealthy pod is not ours.
DisruptionsAllowed: 0,
CurrentHealthy: 2,
DesiredHealthy: 2,
},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t7", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podName: "t7",
expectedDeleteCount: 0,
podTerminating: false,
podPhase: api.PodRunning,
prc: &api.PodCondition{
Type: api.PodReady,
Status: api.ConditionTrue,
},
},
{
name: "matching pdbs with no disruptions allowed, pod running, pod unhealthy, unhealthy pod ours",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{
// This simulates 3 pods desired, our pod unhealthy
DisruptionsAllowed: 0,
CurrentHealthy: 2,
DesiredHealthy: 2,
},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t8", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: false,
podName: "t8",
expectedDeleteCount: 1,
podTerminating: false,
podPhase: api.PodRunning,
prc: &api.PodCondition{
Type: api.PodReady,
Status: api.ConditionFalse,
},
},
{
// This case should return the 529 retry error.
name: "matching pdbs with no disruptions allowed, pod running, pod unhealthy, unhealthy pod ours, resource version conflict",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{
// This simulates 3 pods desired, our pod unhealthy
DisruptionsAllowed: 0,
CurrentHealthy: 2,
DesiredHealthy: 2,
},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t9", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podName: "t9",
expectedDeleteCount: 1,
podTerminating: false,
podPhase: api.PodRunning,
prc: &api.PodCondition{
Type: api.PodReady,
Status: api.ConditionFalse,
},
},
{
// This case should return the 529 retry error.
name: "matching pdbs with no disruptions allowed, pod running, pod unhealthy, unhealthy pod ours, other error on delete",
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{
// This simulates 3 pods desired, our pod unhealthy
DisruptionsAllowed: 0,
CurrentHealthy: 2,
DesiredHealthy: 2,
},
}},
eviction: &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "t10", Namespace: "default"}, DeleteOptions: metav1.NewDeleteOptions(0)},
expectError: true,
podName: "t10",
expectedDeleteCount: 1,
podTerminating: false,
podPhase: api.PodRunning,
prc: &api.PodCondition{
Type: api.PodReady,
Status: api.ConditionFalse,
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
ms := &mockStore{
deleteCount: 0,
}
pod := validNewPod()
pod.Name = tc.podName
pod.Labels = map[string]string{"a": "true"}
pod.Spec.NodeName = "foo"
if tc.podPhase != "" {
pod.Status.Phase = tc.podPhase
}
if tc.podTerminating {
currentTime := metav1.Now()
pod.ObjectMeta.DeletionTimestamp = ¤tTime
}
// Setup pod condition
if tc.prc != nil {
if !podapi.UpdatePodCondition(&pod.Status, tc.prc) {
t.Fatalf("Unable to update pod ready condition")
}
}
client := fake.NewSimpleClientset(tc.pdbs...)
evictionRest := newEvictionStorage(ms, client.PolicyV1beta1())
name := pod.Name
ms.pod = pod
_, err := evictionRest.Create(testContext, name, tc.eviction, nil, &metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("expected error=%v, got %v; name %v", tc.expectError, err, pod.Name)
return
}
if tc.expectedDeleteCount != ms.deleteCount {
t.Errorf("expected delete count=%v, got %v; name %v", tc.expectedDeleteCount, ms.deleteCount, pod.Name)
}
})
}
}
func TestEvictionDryRun(t *testing.T) {
testcases := []struct {
name string
evictionOptions *metav1.DeleteOptions
requestOptions *metav1.CreateOptions
pdbs []runtime.Object
}{
{
name: "just request-options",
requestOptions: &metav1.CreateOptions{DryRun: []string{"All"}},
evictionOptions: &metav1.DeleteOptions{},
},
{
name: "just eviction-options",
requestOptions: &metav1.CreateOptions{},
evictionOptions: &metav1.DeleteOptions{DryRun: []string{"All"}},
},
{
name: "both options",
evictionOptions: &metav1.DeleteOptions{DryRun: []string{"All"}},
requestOptions: &metav1.CreateOptions{DryRun: []string{"All"}},
},
{
name: "with pdbs",
evictionOptions: &metav1.DeleteOptions{DryRun: []string{"All"}},
requestOptions: &metav1.CreateOptions{DryRun: []string{"All"}},
pdbs: []runtime.Object{&policyv1beta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: policyv1beta1.PodDisruptionBudgetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"a": "true"}}},
Status: policyv1beta1.PodDisruptionBudgetStatus{DisruptionsAllowed: 1},
}},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
storage, _, _, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
pod := validNewPod()
pod.Labels = map[string]string{"a": "true"}
pod.Spec.NodeName = "foo"
if _, err := storage.Create(testContext, pod, nil, &metav1.CreateOptions{}); err != nil {
t.Error(err)
}
client := fake.NewSimpleClientset(tc.pdbs...)
evictionRest := newEvictionStorage(storage.Store, client.PolicyV1beta1())
eviction := &policy.Eviction{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, DeleteOptions: tc.evictionOptions}
_, err := evictionRest.Create(testContext, pod.Name, eviction, nil, tc.requestOptions)
if err != nil {
t.Fatalf("Failed to run eviction: %v", err)
}
})
}
}
func resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: "", Resource: resource}
}
type mockStore struct {
deleteCount int
pod *api.Pod
}
func (ms *mockStore) mutatorDeleteFunc(count int, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
if ms.pod.Name == "t4" {
// Always return error for this pod
return nil, false, apierrors.NewConflict(resource("tests"), "2", errors.New("message"))
}
if ms.pod.Name == "t6" || ms.pod.Name == "t8" {
// t6: This pod has a deletionTimestamp and should not raise conflict on delete
// t8: This pod should not have a resource conflict.
return nil, true, nil
}
if ms.pod.Name == "t10" {
return nil, false, apierrors.NewBadRequest("test designed to error")
}
if count == 1 {
// This is a hack to ensure that some test pods don't change phase
// but do change resource version
if ms.pod.Name != "t1" && ms.pod.Name != "t5" {
ms.pod.Status.Phase = api.PodRunning
}
ms.pod.ResourceVersion = "999"
// Always return conflict on the first attempt
return nil, false, apierrors.NewConflict(resource("tests"), "2", errors.New("message"))
}
// Compare enforce deletionOptions
if options == nil || options.Preconditions == nil || options.Preconditions.ResourceVersion == nil {
return nil, true, nil
} else if *options.Preconditions.ResourceVersion != "1000" {
// Here we're simulating that the pod has changed resource version again
// pod "t4" should make it here, this validates we're getting the latest
// resourceVersion of the pod and successfully delete on the next deletion
// attempt after this one.
ms.pod.ResourceVersion = "1000"
return nil, false, apierrors.NewConflict(resource("tests"), "2", errors.New("message"))
}
return nil, true, nil
}
func (ms *mockStore) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
ms.deleteCount++
return ms.mutatorDeleteFunc(ms.deleteCount, options)
}
func (ms *mockStore) Watch(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {
return nil, nil
}
func (ms *mockStore) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
return nil, false, nil
}
func (ms *mockStore) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
return ms.pod, nil
}
func (ms *mockStore) New() runtime.Object {
return nil
}
func (ms *mockStore) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
return nil, nil
}
func (ms *mockStore) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
return nil, nil
}
func (ms *mockStore) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {
return nil, nil
}
func (ms *mockStore) NewList() runtime.Object {
return nil
}
func (ms *mockStore) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
return nil, nil
}
| pkg/registry/core/pod/storage/eviction_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.05894804000854492,
0.0015231958823278546,
0.0001654524530749768,
0.00016888391110114753,
0.007665134500712156
] |
{
"id": 5,
"code_window": [
"\t} else {\n",
"\t\torigState, err = getCurrentState()\n",
"\t}\n",
"\tif err != nil {\n",
"\t\treturn err\n"
],
"labels": [
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "add",
"edit_start_line_idx": 314
} | package godirwalk
// ReadDirents returns a sortable slice of pointers to Dirent structures, each
// representing the file system name and mode type for one of the immediate
// descendant of the specified directory. If the specified directory is a
// symbolic link, it will be resolved.
//
// If an optional scratch buffer is provided that is at least one page of
// memory, it will be used when reading directory entries from the file
// system. If you plan on calling this function in a loop, you will have
// significantly better performance if you allocate a scratch buffer and use it
// each time you call this function.
//
// children, err := godirwalk.ReadDirents(osDirname, nil)
// if err != nil {
// return nil, errors.Wrap(err, "cannot get list of directory children")
// }
// sort.Sort(children)
// for _, child := range children {
// fmt.Printf("%s %s\n", child.ModeType, child.Name)
// }
func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) {
return readDirents(osDirname, scratchBuffer)
}
// ReadDirnames returns a slice of strings, representing the immediate
// descendants of the specified directory. If the specified directory is a
// symbolic link, it will be resolved.
//
// If an optional scratch buffer is provided that is at least one page of
// memory, it will be used when reading directory entries from the file
// system. If you plan on calling this function in a loop, you will have
// significantly better performance if you allocate a scratch buffer and use it
// each time you call this function.
//
// Note that this function, depending on operating system, may or may not invoke
// the ReadDirents function, in order to prepare the list of immediate
// descendants. Therefore, if your program needs both the names and the file
// system mode types of descendants, it will always be faster to invoke
// ReadDirents directly, rather than calling this function, then looping over
// the results and calling os.Stat or os.LStat for each entry.
//
// children, err := godirwalk.ReadDirnames(osDirname, nil)
// if err != nil {
// return nil, errors.Wrap(err, "cannot get list of directory children")
// }
// sort.Strings(children)
// for _, child := range children {
// fmt.Printf("%s\n", child)
// }
func ReadDirnames(osDirname string, scratchBuffer []byte) ([]string, error) {
return readDirnames(osDirname, scratchBuffer)
}
| vendor/github.com/karrick/godirwalk/readdir.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0003243365790694952,
0.00021505076438188553,
0.00016492226859554648,
0.00018079084111377597,
0.00006199338531587273
] |
{
"id": 6,
"code_window": [
"\ttransformContext := authenticatedDataString(key)\n",
"\tfor {\n",
"\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n",
"\t\t\t// It's possible we were working with stale data\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 324
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
// Versioner abstracts setting and retrieving metadata fields from database response
// onto the object ot list. It is required to maintain storage invariants - updating an
// object twice with the same data except for the ResourceVersion and SelfLink must be
// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion,
// intended to be sent directly to or from the backend. A resourceVersion of
// type string is a 'safe' resourceVersion, intended for consumption by users.
type Versioner interface {
// UpdateObject sets storage metadata into an API object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata
// from database.
UpdateObject(obj runtime.Object, resourceVersion uint64) error
// UpdateList sets the resource version into an API list object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata from
// database. continueValue is optional and indicates that more results are available if the client
// passes that value to the server in a subsequent call. remainingItemCount indicates the number
// of remaining objects if the list is partial. The remainingItemCount field is omitted during
// serialization if it is set to nil.
UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error
// PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should
// return an error if the specified object cannot be updated.
PrepareObjectForStorage(obj runtime.Object) error
// ObjectResourceVersion returns the resource version (for persistence) of the specified object.
// Should return an error if the specified object does not have a persistable version.
ObjectResourceVersion(obj runtime.Object) (uint64, error)
// ParseResourceVersion takes a resource version argument and
// converts it to the storage backend. For watch we should pass to helper.Watch().
// Because resourceVersion is an opaque value, the default watch
// behavior for non-zero watch is to watch the next value (if you pass
// "1", you will see updates from "2" onwards).
ParseResourceVersion(resourceVersion string) (uint64, error)
}
// ResponseMeta contains information about the database metadata that is associated with
// an object. It abstracts the actual underlying objects to prevent coupling with concrete
// database and to improve testability.
type ResponseMeta struct {
// TTL is the time to live of the node that contained the returned object. It may be
// zero or negative in some cases (objects may be expired after the requested
// expiration time due to server lag).
TTL int64
// The resource version of the node that contained the returned object.
ResourceVersion uint64
}
// IndexerFunc is a function that for a given object computes
// <value of an index> for a particular <index>.
type IndexerFunc func(obj runtime.Object) string
// IndexerFuncs is a mapping from <index name> to function that
// for a given object computes <value for that index>.
type IndexerFuncs map[string]IndexerFunc
// Everything accepts all objects.
var Everything = SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
}
// MatchValue defines a pair (<index name>, <value for that index>).
type MatchValue struct {
IndexName string
Value string
}
// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update
// that is guaranteed to succeed.
// See the comment for GuaranteedUpdate for more details.
type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error)
// ValidateObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. The function may NOT transform the provided
// object.
type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error
// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc.
func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error {
return nil
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty"`
// Specifies the target ResourceVersion
// +optional
ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
u := types.UID(uid)
return &Preconditions{UID: &u}
}
func (p *Preconditions) Check(key string, obj runtime.Object) error {
if p == nil {
return nil
}
objMeta, err := meta.Accessor(obj)
if err != nil {
return NewInternalErrorf(
"can't enforce preconditions %v on un-introspectable object %v, got error: %v",
*p,
obj,
err)
}
if p.UID != nil && *p.UID != objMeta.GetUID() {
err := fmt.Sprintf(
"Precondition failed: UID in precondition: %v, UID in object meta: %v",
*p.UID,
objMeta.GetUID())
return NewInvalidObjError(key, err)
}
if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() {
err := fmt.Sprintf(
"Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v",
*p.ResourceVersion,
objMeta.GetResourceVersion())
return NewInvalidObjError(key, err)
}
return nil
}
// Interface offers a common interface for object marshaling/unmarshaling operations and
// hides all the storage-related operations behind it.
type Interface interface {
// Returns Versioner associated with this interface.
Versioner() Versioner
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
Delete(
ctx context.Context, key string, out runtime.Object, preconditions *Preconditions,
validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object) error
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// WatchList begins watching the specified key's items. Items are decoded into API
// objects and any item selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will list current objects directory defined by key
// and send them in "ADDED" events, before watch starts.
WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// Get unmarshals json found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error
// GetToList unmarshals json found at key and opaque it into *List api object
// (an object that satisfies the runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// List unmarshalls jsons found at directory defined by key and opaque them
// into *List api object (an object that satisfies runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// or zero value in 'ptrToType' parameter otherwise.
// If the object to update has the same value as previous, it won't do any update
// but will return the object in 'ptrToType' parameter.
// If 'suggestion' is non-nil, it can be used as a suggestion about the current version
// of the object to avoid read operation from storage to get it. However, the
// implementations have to retry in case suggestion is stale.
//
// Example:
//
// s := /* implementation of Interface */
// err := s.GuaranteedUpdate(
// "myKey", &MyType{}, true,
// func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) {
// // Before each invocation of the user defined function, "input" is reset to
// // current contents for "myKey" in database.
// curr := input.(*MyType) // Guaranteed to succeed.
//
// // Make the modification
// curr.Counter++
//
// // Return the modified object - return an error to stop iterating. Return
// // a uint64 to alter the TTL on the object, or nil to keep it the same value.
// return cur, nil, nil
// },
// )
GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error
// Count returns number of different entries under the key (generally being path prefix).
Count(key string) (int64, error)
}
// GetOptions provides the options that may be provided for storage get operations.
type GetOptions struct {
// IgnoreNotFound determines what is returned if the requested object is not found. If
// true, a zero object is returned. If false, an error is returned.
IgnoreNotFound bool
// ResourceVersion provides a resource version constraint to apply to the get operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
}
// ListOptions provides the options that may be provided for storage list operations.
type ListOptions struct {
// ResourceVersion provides a resource version constraint to apply to the list operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
// ResourceVersionMatch provides the rule for how the resource version constraint applies. If set
// to the default value "" the legacy resource version semantic apply.
ResourceVersionMatch metav1.ResourceVersionMatch
// Predicate provides the selection rules for the list operation.
Predicate SelectionPredicate
// ProgressNotify determines whether storage-originated bookmark (progress notify) events should
// be delivered to the users. The option is ignored for non-watch requests.
ProgressNotify bool
}
| staging/src/k8s.io/apiserver/pkg/storage/interfaces.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.006535582710057497,
0.0006754786008968949,
0.0001630826445762068,
0.00017840240616351366,
0.0012344014830887318
] |
{
"id": 6,
"code_window": [
"\ttransformContext := authenticatedDataString(key)\n",
"\tfor {\n",
"\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n",
"\t\t\t// It's possible we were working with stale data\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 324
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package iscsi contains the internal representation of Internet Small
// Computer System Interface (iSCSI) volumes.
package iscsi // import "k8s.io/kubernetes/pkg/volume/iscsi"
| pkg/volume/iscsi/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017985899467021227,
0.00017771084094420075,
0.000175562672666274,
0.00017771084094420075,
0.000002148161001969129
] |
{
"id": 6,
"code_window": [
"\ttransformContext := authenticatedDataString(key)\n",
"\tfor {\n",
"\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n",
"\t\t\t// It's possible we were working with stale data\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 324
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["kubectl_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/kubectl",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| test/e2e/framework/kubectl/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0001772720570443198,
0.0001757079007802531,
0.00017448609287384897,
0.00017553672660142183,
0.0000010916596693277825
] |
{
"id": 6,
"code_window": [
"\ttransformContext := authenticatedDataString(key)\n",
"\tfor {\n",
"\t\tif err := preconditions.Check(key, origState.obj); err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n",
"\t\t\t// It's possible we were working with stale data\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 324
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build mips mipsle
package unix
import (
"syscall"
"unsafe"
)
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
//sys dup2(oldfd int, newfd int) (err error)
//sysnb EpollCreate(size int) (fd int, err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
//sysnb Getegid() (egid int)
//sysnb Geteuid() (euid int)
//sysnb Getgid() (gid int)
//sysnb Getuid() (uid int)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
//sysnb setgroups(n int, list *_Gid_t) (err error)
//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
//sysnb socket(domain int, typ int, proto int) (fd int, err error)
//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sysnb InotifyInit() (fd int, err error)
//sys Ioperm(from int, num int, on int) (err error)
//sys Iopl(level int) (err error)
//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error)
//sysnb Gettimeofday(tv *Timeval) (err error)
//sysnb Time(t *Time_t) (tt Time_t, err error)
//sys Utime(path string, buf *Utimbuf) (err error)
//sys utimes(path string, times *[2]Timeval) (err error)
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys Pause() (err error)
func Fstatfs(fd int, buf *Statfs_t) (err error) {
_, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
if e != 0 {
err = errnoErr(e)
}
return
}
func Statfs(path string, buf *Statfs_t) (err error) {
p, err := BytePtrFromString(path)
if err != nil {
return err
}
_, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(p)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
if e != 0 {
err = errnoErr(e)
}
return
}
func Seek(fd int, offset int64, whence int) (off int64, err error) {
_, _, e := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offset>>32), uintptr(offset), uintptr(unsafe.Pointer(&off)), uintptr(whence), 0)
if e != 0 {
err = errnoErr(e)
}
return
}
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe() (p1 int, p2 int, err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
p[0], p[1], err = pipe()
return
}
//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
page := uintptr(offset / 4096)
if offset != int64(page)*4096 {
return 0, EINVAL
}
return mmap2(addr, length, prot, flags, fd, page)
}
const rlimInf32 = ^uint32(0)
const rlimInf64 = ^uint64(0)
type rlimit32 struct {
Cur uint32
Max uint32
}
//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
func Getrlimit(resource int, rlim *Rlimit) (err error) {
err = prlimit(0, resource, nil, rlim)
if err != ENOSYS {
return err
}
rl := rlimit32{}
err = getrlimit(resource, &rl)
if err != nil {
return
}
if rl.Cur == rlimInf32 {
rlim.Cur = rlimInf64
} else {
rlim.Cur = uint64(rl.Cur)
}
if rl.Max == rlimInf32 {
rlim.Max = rlimInf64
} else {
rlim.Max = uint64(rl.Max)
}
return
}
//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
func Setrlimit(resource int, rlim *Rlimit) (err error) {
err = prlimit(0, resource, rlim, nil)
if err != ENOSYS {
return err
}
rl := rlimit32{}
if rlim.Cur == rlimInf64 {
rl.Cur = rlimInf32
} else if rlim.Cur < uint64(rlimInf32) {
rl.Cur = uint32(rlim.Cur)
} else {
return EINVAL
}
if rlim.Max == rlimInf64 {
rl.Max = rlimInf32
} else if rlim.Max < uint64(rlimInf32) {
rl.Max = uint32(rlim.Max)
} else {
return EINVAL
}
return setrlimit(resource, &rl)
}
func (r *PtraceRegs) PC() uint64 { return r.Epc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc }
func (iov *Iovec) SetLen(length int) {
iov.Len = uint32(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (msghdr *Msghdr) SetIovlen(length int) {
msghdr.Iovlen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
func Poll(fds []PollFd, timeout int) (n int, err error) {
if len(fds) == 0 {
return poll(nil, 0, timeout)
}
return poll(&fds[0], len(fds), timeout)
}
| vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00020404627139214426,
0.0001724958565318957,
0.00016489838890265673,
0.00017019610095303506,
0.000007863399332563858
] |
{
"id": 7,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 334
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.981941819190979,
0.02166721597313881,
0.00016181713726837188,
0.00017134868539869785,
0.13724325597286224
] |
{
"id": 7,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 334
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/etcdserver/api/membership"
"go.etcd.io/etcd/etcdserver/api/rafthttp"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/types"
"go.uber.org/zap"
)
// isConnectedToQuorumSince checks whether the local member is connected to the
// quorum of the cluster since the given time.
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
}
// isConnectedSince checks whether the local member is connected to the
// remote member since the given time.
func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
t := transport.ActiveSince(remote)
return !t.IsZero() && t.Before(since)
}
// isConnectedFullySince checks whether the local member is connected to all
// members in the cluster since the given time.
func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
return numConnectedSince(transport, since, self, members) == len(members)
}
// numConnectedSince counts how many members are connected to the local member
// since the given time.
func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
connectedNum := 0
for _, m := range members {
if m.ID == self || isConnectedSince(transport, since, m.ID) {
connectedNum++
}
}
return connectedNum
}
// longestConnected chooses the member with longest active-since-time.
// It returns false, if nothing is active.
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
var longest types.ID
var oldest time.Time
for _, id := range membs {
tm := tp.ActiveSince(id)
if tm.IsZero() { // inactive
continue
}
if oldest.IsZero() { // first longest candidate
oldest = tm
longest = id
}
if tm.Before(oldest) {
oldest = tm
longest = id
}
}
if uint64(longest) == 0 {
return longest, false
}
return longest, true
}
type notifier struct {
c chan struct{}
err error
}
func newNotifier() *notifier {
return ¬ifier{
c: make(chan struct{}),
}
}
func (nc *notifier) notify(err error) {
nc.err = err
close(nc.c)
}
func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
var resp string
if !isNil(respMsg) {
resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
}
warnOfExpensiveGenericRequest(lg, now, reqStringer, "", resp, err)
}
func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
var resp string
if !isNil(respMsg) {
resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
}
d := time.Since(now)
if lg != nil {
lg.Warn(
"failed to apply request",
zap.Duration("took", d),
zap.String("request", reqStringer.String()),
zap.String("response", resp),
zap.Error(err),
)
} else {
plog.Warningf("failed to apply request %q with response %q took (%v) to execute, err is %v", reqStringer.String(), resp, d, err)
}
}
func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
reqStringer := pb.NewLoggableTxnRequest(r)
var resp string
if !isNil(txnResponse) {
var resps []string
for _, r := range txnResponse.Responses {
switch op := r.Response.(type) {
case *pb.ResponseOp_ResponseRange:
resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
default:
// only range responses should be in a read only txn request
}
}
resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
}
warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
}
func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
var resp string
if !isNil(rangeResponse) {
resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
}
warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
}
func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
d := time.Since(now)
if d > warnApplyDuration {
if lg != nil {
lg.Warn(
"apply request took too long",
zap.Duration("took", d),
zap.Duration("expected-duration", warnApplyDuration),
zap.String("prefix", prefix),
zap.String("request", reqStringer.String()),
zap.String("response", resp),
zap.Error(err),
)
} else {
var result string
if err != nil {
result = fmt.Sprintf("error:%v", err)
} else {
result = resp
}
plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
}
slowApplies.Inc()
}
}
func isNil(msg proto.Message) bool {
return msg == nil || reflect.ValueOf(msg).IsNil()
}
// panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead.
// This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests
// with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197.
type panicAlternativeStringer struct {
stringer fmt.Stringer
alternative func() string
}
func (n panicAlternativeStringer) String() (s string) {
defer func() {
if err := recover(); err != nil {
s = n.alternative()
}
}()
s = n.stringer.String()
return s
}
| vendor/go.etcd.io/etcd/etcdserver/util.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.002099506091326475,
0.00033175176940858364,
0.00016686711751390249,
0.00017296230362262577,
0.0004258572880644351
] |
{
"id": 7,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 334
} | // Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux,!appengine
package util
import (
"bytes"
"os"
"syscall"
)
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
//
// Note that this function will not read files larger than 128 bytes.
func SysReadFile(file string) (string, error) {
f, err := os.Open(file)
if err != nil {
return "", err
}
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
// Go's ioutil.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
const sysFileBufferSize = 128
b := make([]byte, sysFileBufferSize)
n, err := syscall.Read(int(f.Fd()), b)
if err != nil {
return "", err
}
return string(bytes.TrimSpace(b[:n])), nil
}
| vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0004619703395292163,
0.00028594073955900967,
0.00016606553981546313,
0.00017723269411362708,
0.00013815853162668645
] |
{
"id": 7,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 334
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
var metadataAccessor = meta.NewAccessor()
// Helper provides methods for retrieving or mutating a RESTful
// resource.
type Helper struct {
// The name of this resource as the server would recognize it
Resource string
// A RESTClient capable of mutating this resource.
RESTClient RESTClient
// True if the resource type is scoped to namespaces
NamespaceScoped bool
// If true, then use server-side dry-run to not persist changes to storage
// for verbs and resources that support server-side dry-run.
//
// Note this should only be used against an apiserver with dry-run enabled,
// and on resources that support dry-run. If the apiserver or the resource
// does not support dry-run, then the change will be persisted to storage.
ServerDryRun bool
// FieldManager is the name associated with the actor or entity that is making
// changes.
FieldManager string
}
// NewHelper creates a Helper from a ResourceMapping
func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper {
return &Helper{
Resource: mapping.Resource.Resource,
RESTClient: client,
NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace,
}
}
// DryRun, if true, will use server-side dry-run to not persist changes to storage.
// Otherwise, changes will be persisted to storage.
func (m *Helper) DryRun(dryRun bool) *Helper {
m.ServerDryRun = dryRun
return m
}
// WithFieldManager sets the field manager option to indicate the actor or entity
// that is making changes in a create or update operation.
func (m *Helper) WithFieldManager(fieldManager string) *Helper {
m.FieldManager = fieldManager
return m
}
func (m *Helper) Get(namespace, name string) (runtime.Object, error) {
req := m.RESTClient.Get().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
Name(name)
return req.Do(context.TODO()).Get()
}
func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) (runtime.Object, error) {
req := m.RESTClient.Get().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
VersionedParams(options, metav1.ParameterCodec)
return req.Do(context.TODO()).Get()
}
func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) {
options.Watch = true
return m.RESTClient.Get().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
VersionedParams(options, metav1.ParameterCodec).
Watch(context.TODO())
}
func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) {
return m.RESTClient.Get().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
VersionedParams(&metav1.ListOptions{
ResourceVersion: resourceVersion,
Watch: true,
FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(),
}, metav1.ParameterCodec).
Watch(context.TODO())
}
func (m *Helper) Delete(namespace, name string) (runtime.Object, error) {
return m.DeleteWithOptions(namespace, name, nil)
}
func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) {
if options == nil {
options = &metav1.DeleteOptions{}
}
if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll}
}
return m.RESTClient.Delete().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
Name(name).
Body(options).
Do(context.TODO()).
Get()
}
func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) {
return m.CreateWithOptions(namespace, modify, obj, nil)
}
func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) {
if options == nil {
options = &metav1.CreateOptions{}
}
if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll}
}
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
if modify {
// Attempt to version the object based on client logic.
version, err := metadataAccessor.ResourceVersion(obj)
if err != nil {
// We don't know how to clear the version on this object, so send it to the server as is
return m.createResource(m.RESTClient, m.Resource, namespace, obj, options)
}
if version != "" {
if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil {
return nil, err
}
}
}
return m.createResource(m.RESTClient, m.Resource, namespace, obj, options)
}
func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) {
return c.Post().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(resource).
VersionedParams(options, metav1.ParameterCodec).
Body(obj).
Do(context.TODO()).
Get()
}
func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) {
if options == nil {
options = &metav1.PatchOptions{}
}
if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll}
}
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
return m.RESTClient.Patch(pt).
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(m.Resource).
Name(name).
VersionedParams(options, metav1.ParameterCodec).
Body(data).
Do(context.TODO()).
Get()
}
func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) {
c := m.RESTClient
var options = &metav1.UpdateOptions{}
if m.ServerDryRun {
options.DryRun = []string{metav1.DryRunAll}
}
if m.FieldManager != "" {
options.FieldManager = m.FieldManager
}
// Attempt to version the object based on client logic.
version, err := metadataAccessor.ResourceVersion(obj)
if err != nil {
// We don't know how to version this object, so send it to the server as is
return m.replaceResource(c, m.Resource, namespace, name, obj, options)
}
if version == "" && overwrite {
// Retrieve the current version of the object to overwrite the server object
serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do(context.TODO()).Get()
if err != nil {
// The object does not exist, but we want it to be created
return m.replaceResource(c, m.Resource, namespace, name, obj, options)
}
serverVersion, err := metadataAccessor.ResourceVersion(serverObj)
if err != nil {
return nil, err
}
if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil {
return nil, err
}
}
return m.replaceResource(c, m.Resource, namespace, name, obj, options)
}
func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) {
return c.Put().
NamespaceIfScoped(namespace, m.NamespaceScoped).
Resource(resource).
Name(name).
VersionedParams(options, metav1.ParameterCodec).
Body(obj).
Do(context.TODO()).
Get()
}
| staging/src/k8s.io/cli-runtime/pkg/resource/helper.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00019211904145777225,
0.0001697769621387124,
0.00016169219452422112,
0.0001675979292485863,
0.0000062427834564005025
] |