File size: 73,334 Bytes
7a836ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Related configuration of etcd, used to store Milvus metadata & service discovery.
etcd:
  # Endpoints used to access etcd service. You can change this parameter as the endpoints of your own etcd cluster.
  # Environment variable: ETCD_ENDPOINTS
  # etcd preferentially acquires valid address from environment variable ETCD_ENDPOINTS when Milvus is started.
  endpoints: localhost:2379
  # Root prefix of the key to where Milvus stores data in etcd.
  # It is recommended to change this parameter before starting Milvus for the first time.
  # To share an etcd instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them.
  # Set an easy-to-identify root path for Milvus if etcd service already exists.
  # Changing this for an already running Milvus instance may result in failures to read legacy data.
  rootPath: by-dev
  # Sub-prefix of the key to where Milvus stores metadata-related information in etcd.
  # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
  # It is recommended to change this parameter before starting Milvus for the first time.
  metaSubPath: meta
  # Sub-prefix of the key to where Milvus stores timestamps in etcd.
  # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
  # It is recommended not to change this parameter if there is no specific reason.
  kvSubPath: kv
  log:
    level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
    # path is one of:
    #  - "default" as os.Stderr,
    #  - "stderr" as os.Stderr,
    #  - "stdout" as os.Stdout,
    #  - file path to append server logs to.
    # please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
    path: stdout
  ssl:
    enabled: false # Whether to support ETCD secure connection mode
    tlsCert: /path/to/etcd-client.pem # path to your cert file
    tlsKey: /path/to/etcd-client-key.pem # path to your key file
    tlsCACert: /path/to/ca.pem # path to your CACert file
    # TLS min version
    # Optional values: 1.0, 1.1, 1.2, 1.3。
    # We recommend using version 1.2 and above.
    tlsMinVersion: 1.3
  requestTimeout: 10000 # Etcd operation timeout in milliseconds
  use:
    embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
  data:
    dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
  auth:
    enabled: false # Whether to enable authentication
    userName:  # username for etcd authentication
    password:  # password for etcd authentication

metastore:
  type: etcd # Default value: etcd, Valid values: [etcd, tikv]
  snapshot:
    ttl: 86400 # snapshot ttl in seconds
    reserveTime: 3600 # snapshot reserve time in seconds

# Related configuration of tikv, used to store Milvus metadata.
# Notice that when TiKV is enabled for metastore, you still need to have etcd for service discovery.
# TiKV is a good option when the metadata size requires better horizontal scalability.
tikv:
  endpoints: 127.0.0.1:2389 # Note that the default pd port of tikv is 2379, which conflicts with etcd.
  rootPath: by-dev # The root path where data is stored in tikv
  metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
  kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
  requestTimeout: 10000 # ms, tikv request timeout
  snapshotScanSize: 256 # batch size of tikv snapshot scan
  ssl:
    enabled: false # Whether to support TiKV secure connection mode
    tlsCert:  # path to your cert file
    tlsKey:  # path to your key file
    tlsCACert:  # path to your CACert file

localStorage:
  # Local path to where vector data are stored during a search or a query to avoid repetitve access to MinIO or S3 service.
  # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
  # It is recommended to change this parameter before starting Milvus for the first time.
  path: /var/lib/milvus/data/

# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
# We refer to the storage service as MinIO/S3 in the following description for simplicity.
minio:
  # IP address of MinIO or S3 service.
  # Environment variable: MINIO_ADDRESS
  # minio.address and minio.port together generate the valid access to MinIO or S3 service.
  # MinIO preferentially acquires the valid IP address from the environment variable MINIO_ADDRESS when Milvus is started.
  # Default value applies when MinIO or S3 is running on the same network with Milvus.
  address: localhost
  port: 9000 # Port of MinIO or S3 service.
  # Access key ID that MinIO or S3 issues to user for authorized access.
  # Environment variable: MINIO_ACCESS_KEY_ID or minio.accessKeyID
  # minio.accessKeyID and minio.secretAccessKey together are used for identity authentication to access the MinIO or S3 service.
  # This configuration must be set identical to the environment variable MINIO_ACCESS_KEY_ID, which is necessary for starting MinIO or S3.
  # The default value applies to MinIO or S3 service that started with the default docker-compose.yml file.
  accessKeyID: minioadmin
  # Secret key used to encrypt the signature string and verify the signature string on server. It must be kept strictly confidential and accessible only to the MinIO or S3 server and users.
  # Environment variable: MINIO_SECRET_ACCESS_KEY or minio.secretAccessKey
  # minio.accessKeyID and minio.secretAccessKey together are used for identity authentication to access the MinIO or S3 service.
  # This configuration must be set identical to the environment variable MINIO_SECRET_ACCESS_KEY, which is necessary for starting MinIO or S3.
  # The default value applies to MinIO or S3 service that started with the default docker-compose.yml file.
  secretAccessKey: minioadmin
  useSSL: false # Switch value to control if to access the MinIO or S3 service through SSL.
  ssl:
    tlsCACert: /path/to/public.crt # path to your CACert file
  # Name of the bucket where Milvus stores data in MinIO or S3.
  # Milvus 2.0.0 does not support storing data in multiple buckets.
  # Bucket with this name will be created if it does not exist. If the bucket already exists and is accessible, it will be used directly. Otherwise, there will be an error.
  # To share an MinIO instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them. For details, see Operation FAQs.
  # The data will be stored in the local Docker if Docker is used to start the MinIO service locally. Ensure that there is sufficient storage space.
  # A bucket name is globally unique in one MinIO or S3 instance.
  bucketName: a-bucket
  # Root prefix of the key to where Milvus stores data in MinIO or S3.
  # It is recommended to change this parameter before starting Milvus for the first time.
  # To share an MinIO instance among multiple Milvus instances, consider changing this to a different value for each Milvus instance before you start them. For details, see Operation FAQs.
  # Set an easy-to-identify root key prefix for Milvus if etcd service already exists.
  # Changing this for an already running Milvus instance may result in failures to read legacy data.
  rootPath: files
  # Whether to useIAM role to access S3/GCS instead of access/secret keys
  # For more information, refer to
  # aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
  # gcp: https://cloud.google.com/storage/docs/access-control/iam
  # aliyun (ack): https://www.alibabacloud.com/help/en/container-service-for-kubernetes/latest/use-rrsa-to-enforce-access-control
  # aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role
  useIAM: false
  # Cloud Provider of S3. Supports: "aws", "gcp", "aliyun".
  # Cloud Provider of Google Cloud Storage. Supports: "gcpnative".
  # You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
  # You can use "gcp" for other cloud provider supports S3 API with signature v2
  # You can use "aliyun" for other cloud provider uses virtual host style bucket
  # You can use "gcpnative" for the Google Cloud Platform provider. Uses service account credentials
  # for authentication.
  # When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now
  cloudProvider: aws
  # The JSON content contains the gcs service account credentials.
  # Used only for the "gcpnative" cloud provider.
  gcpCredentialJSON: 
  # Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
  # Leave it empty if you want to use AWS default endpoint
  iamEndpoint: 
  logLevel: fatal # Log level for aws sdk log. Supported level:  off, fatal, error, warn, info, debug, trace
  region:  # Specify minio storage system location region
  useVirtualHost: false # Whether use virtual host mode for bucket
  requestTimeoutMs: 10000 # minio timeout for request time in milliseconds
  # The maximum number of objects requested per batch in minio ListObjects rpc, 
  # 0 means using oss client by default, decrease these configration if ListObjects timeout
  listObjectsMaxKeys: 0

# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
# You can change your mq by setting mq.type field.
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
# 1. standalone(local) mode: rocksmq(default) > natsmq > Pulsar > Kafka
# 2. cluster mode:  Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)
mq:
  # Default value: "default"
  # Valid values: [default, pulsar, kafka, rocksmq, natsmq]
  type: default
  enablePursuitMode: true # Default value: "true"
  pursuitLag: 10 # time tick lag threshold to enter pursuit mode, in seconds
  pursuitBufferSize: 8388608 # pursuit mode buffer size in bytes
  pursuitBufferTime: 60 # pursuit mode buffer time in seconds
  mqBufSize: 16 # MQ client consumer buffer length
  dispatcher:
    mergeCheckInterval: 1 # the interval time(in seconds) for dispatcher to check whether to merge
    targetBufSize: 16 # the lenth of channel buffer for targe
    maxTolerantLag: 3 # Default value: "3", the timeout(in seconds) that target sends msgPack

# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
pulsar:
  # IP address of Pulsar service.
  # Environment variable: PULSAR_ADDRESS
  # pulsar.address and pulsar.port together generate the valid access to Pulsar.
  # Pulsar preferentially acquires the valid IP address from the environment variable PULSAR_ADDRESS when Milvus is started.
  # Default value applies when Pulsar is running on the same network with Milvus.
  address: localhost
  port: 6650 # Port of Pulsar service.
  webport: 80 # Web port of of Pulsar service. If you connect direcly without proxy, should use 8080.
  # The maximum size of each message in Pulsar. Unit: Byte.
  # By default, Pulsar can transmit at most 2MB of data in a single message. When the size of inserted data is greater than this value, proxy fragments the data into multiple messages to ensure that they can be transmitted correctly.
  # If the corresponding parameter in Pulsar remains unchanged, increasing this configuration will cause Milvus to fail, and reducing it produces no advantage.
  maxMessageSize: 2097152
  # Pulsar can be provisioned for specific tenants with appropriate capacity allocated to the tenant.
  # To share a Pulsar instance among multiple Milvus instances, you can change this to an Pulsar tenant rather than the default one for each Milvus instance before you start them. However, if you do not want Pulsar multi-tenancy, you are advised to change msgChannel.chanNamePrefix.cluster to the different value.
  tenant: public
  namespace: default # A Pulsar namespace is the administrative unit nomenclature within a tenant.
  requestTimeout: 60 # pulsar client global request timeout in seconds
  enableClientMetrics: false # Whether to register pulsar client metrics into milvus metrics path.

# If you want to enable kafka, needs to comment the pulsar configs
# kafka:
#   brokerList: localhost:9092
#   saslUsername: 
#   saslPassword: 
#   saslMechanisms: 
#   securityProtocol: 
#   ssl:
#     enabled: false # whether to enable ssl mode
#     tlsCert:  # path to client's public key (PEM) used for authentication
#     tlsKey:  # path to client's private key (PEM) used for authentication
#     tlsCaCert:  # file or directory path to CA certificate(s) for verifying the broker's key
#     tlsKeyPassword:  # private key passphrase for use with ssl.key.location and set_ssl_cert(), if any
#   readTimeout: 10

rocksmq:
  # Prefix of the key to where Milvus stores data in RocksMQ.
  # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
  # It is recommended to change this parameter before starting Milvus for the first time.
  # Set an easy-to-identify root key prefix for Milvus if etcd service already exists.
  path: /var/lib/milvus/rdb_data
  lrucacheratio: 0.06 # rocksdb cache memory ratio
  rocksmqPageSize: 67108864 # The maximum size of messages in each page in RocksMQ. Messages in RocksMQ are checked and cleared (when expired) in batch based on this parameters. Unit: Byte.
  retentionTimeInMinutes: 4320 # The maximum retention time of acked messages in RocksMQ. Acked messages in RocksMQ are retained for the specified period of time and then cleared. Unit: Minute.
  retentionSizeInMB: 8192 # The maximum retention size of acked messages of each topic in RocksMQ. Acked messages in each topic are cleared if their size exceed this parameter. Unit: MB.
  compactionInterval: 86400 # Time interval to trigger rocksdb compaction to remove deleted data. Unit: Second
  compressionTypes: 0,0,7,7,7 # compaction compression type, only support use 0,7. 0 means not compress, 7 will use zstd. Length of types means num of rocksdb level.

# natsmq configuration.
# more detail: https://docs.nats.io/running-a-nats-service/configuration
natsmq:
  server:
    port: 4222 # Listening port of the NATS server.
    storeDir: /var/lib/milvus/nats # Directory to use for JetStream storage of nats
    maxFileStore: 17179869184 # Maximum size of the 'file' storage
    maxPayload: 8388608 # Maximum number of bytes in a message payload
    maxPending: 67108864 # Maximum number of bytes buffered for a connection Applies to client connections
    initializeTimeout: 4000 # waiting for initialization of natsmq finished
    monitor:
      trace: false # If true enable protocol trace log messages
      debug: false # If true enable debug log messages
      logTime: true # If set to false, log without timestamps.
      logFile: /tmp/milvus/logs/nats.log # Log file path relative to .. of milvus binary if use relative path
      logSizeLimit: 536870912 # Size in bytes after the log file rolls over to a new one
    retention:
      maxAge: 4320 # Maximum age of any message in the P-channel
      maxBytes:  # How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size
      maxMsgs:  # How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit

# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
rootCoord:
  dmlChannelNum: 16 # The number of DML-Channels to create at the root coord startup.
  # The maximum number of partitions in each collection.
  # New partitions cannot be created if this parameter is set as 0 or 1.
  # Range: [0, INT64MAX]
  maxPartitionNum: 1024
  # The minimum row count of a segment required for creating index.
  # Segments with smaller size than this parameter will not be indexed, and will be searched with brute force.
  minSegmentSizeToEnableIndex: 1024
  enableActiveStandby: false
  maxDatabaseNum: 64 # Maximum number of database
  maxGeneralCapacity: 65536 # upper limit for the sum of of product of partitionNumber and shardNumber
  gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
  ip:  # TCP/IP address of rootCoord. If not specified, use the first unicastable address
  port: 53100 # TCP port of rootCoord
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the rootCoord can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the rootCoord can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on rootCoord can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on rootCoord can receive, unit: byte

# Related configuration of proxy, used to validate client requests and reduce the returned results.
proxy:
  timeTickInterval: 200 # The interval at which proxy synchronizes the time tick, unit: ms.
  healthCheckTimeout: 3000 # ms, the interval that to do component healthy check
  msgStream:
    timeTick:
      bufSize: 512 # The maximum number of messages can be buffered in the timeTick message stream of the proxy when producing messages.
  maxNameLength: 255 # The maximum length of the name or alias that can be created in Milvus, including the collection name, collection alias, partition name, and field name.
  maxFieldNum: 64 # The maximum number of field can be created when creating in a collection. It is strongly DISCOURAGED to set maxFieldNum >= 64.
  maxVectorFieldNum: 4 # The maximum number of vector fields that can be specified in a collection. Value range: [1, 10].
  maxShardNum: 16 # The maximum number of shards can be created when creating in a collection.
  maxDimension: 32768 # The maximum number of dimensions of a vector can have when creating in a collection.
  # Whether to produce gin logs.\n
  # please adjust in embedded Milvus: false
  ginLogging: true
  ginLogSkipPaths: / # skip url path for gin log
  maxTaskNum: 1024 # The maximum number of tasks in the task queue of the proxy.
  mustUsePartitionKey: false # switch for whether proxy must use partition key for the collection
  accessLog:
    enable: false # Whether to enable the access log feature.
    minioEnable: false # Whether to upload local access log files to MinIO. This parameter can be specified when proxy.accessLog.filename is not empty.
    localPath: /tmp/milvus_access # The local folder path where the access log file is stored. This parameter can be specified when proxy.accessLog.filename is not empty.
    filename:  # The name of the access log file. If you leave this parameter empty, access logs will be printed to stdout.
    maxSize: 64 # The maximum size allowed for a single access log file. If the log file size reaches this limit, a rotation process will be triggered. This process seals the current access log file, creates a new log file, and clears the contents of the original log file. Unit: MB.
    rotatedTime: 0 # The maximum time interval allowed for rotating a single access log file. Upon reaching the specified time interval, a rotation process is triggered, resulting in the creation of a new access log file and sealing of the previous one. Unit: seconds
    remotePath: access_log/ # The path of the object storage for uploading access log files.
    remoteMaxTime: 0 # The time interval allowed for uploading access log files. If the upload time of a log file exceeds this interval, the file will be deleted. Setting the value to 0 disables this feature.
    formatters:
      base:
        format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost]"
      query:
        format: "[$time_now] [ACCESS] <$user_name: $user_addr> $method_name [status: $method_status] [code: $error_code] [sdk: $sdk_version] [msg: $error_msg] [traceID: $trace_id] [timeCost: $time_cost] [database: $database_name] [collection: $collection_name] [partitions: $partition_name] [expr: $method_expr]"
        methods: "Query,Search,Delete"
    cacheSize: 0 # Size of log of write cache, in byte. (Close write cache if size was 0)
    cacheFlushInterval: 3 # time interval of auto flush write cache, in seconds. (Close auto flush if interval was 0)
  connectionCheckIntervalSeconds: 120 # the interval time(in seconds) for connection manager to scan inactive client info
  connectionClientInfoTTLSeconds: 86400 # inactive client info TTL duration, in seconds
  maxConnectionNum: 10000 # the max client info numbers that proxy should manage, avoid too many client infos
  gracefulStopTimeout: 30 # seconds. force stop node without graceful stop
  slowQuerySpanInSeconds: 5 # query whose executed time exceeds the `slowQuerySpanInSeconds` can be considered slow, in seconds.
  queryNodePooling:
    size: 10 # the size for shardleader(querynode) client pool
  http:
    enabled: true # Whether to enable the http server
    debug_mode: false # Whether to enable http server debug mode
    port:  # high-level restful api
    acceptTypeAllowInt64: true # high-level restful api, whether http client can deal with int64
    enablePprof: true # Whether to enable pprof middleware on the metrics port
  ip:  # TCP/IP address of proxy. If not specified, use the first unicastable address
  port: 19530 # TCP port of proxy
  internalPort: 19529
  grpc:
    serverMaxSendSize: 268435456 # The maximum size of each RPC request that the proxy can send, unit: byte
    serverMaxRecvSize: 67108864 # The maximum size of each RPC request that the proxy can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on proxy can send, unit: byte
    clientMaxRecvSize: 67108864 # The maximum size of each RPC request that the clients on proxy can receive, unit: byte

# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
queryCoord:
  taskMergeCap: 1
  taskExecutionCap: 256
  # Switch value to control if to automatically replace a growing segment with the corresponding indexed sealed segment when the growing segment reaches the sealing threshold.
  # If this parameter is set false, Milvus simply searches the growing segments with brute force.
  autoHandoff: true
  autoBalance: true # Switch value to control if to automatically balance the memory usage among query nodes by distributing segment loading and releasing operations evenly.
  autoBalanceChannel: true # Enable auto balance channel
  balancer: ScoreBasedBalancer # auto balancer used for segments on queryNodes
  globalRowCountFactor: 0.1 # the weight used when balancing segments among queryNodes
  scoreUnbalanceTolerationFactor: 0.05 # the least value for unbalanced extent between from and to nodes when doing balance
  reverseUnBalanceTolerationFactor: 1.3 # the largest value for unbalanced extent between from and to nodes after doing balance
  overloadedMemoryThresholdPercentage: 90 # The threshold of memory usage (in percentage) in a query node to trigger the sealed segment balancing.
  balanceIntervalSeconds: 60 # The interval at which query coord balances the memory usage among query nodes.
  memoryUsageMaxDifferencePercentage: 30 # The threshold of memory usage difference (in percentage) between any two query nodes to trigger the sealed segment balancing.
  rowCountFactor: 0.4 # the row count weight used when balancing segments among queryNodes
  segmentCountFactor: 0.4 # the segment count weight used when balancing segments among queryNodes
  globalSegmentCountFactor: 0.1 # the segment count weight used when balancing segments among queryNodes
  segmentCountMaxSteps: 50 # segment count based plan generator max steps
  rowCountMaxSteps: 50 # segment count based plan generator max steps
  randomMaxSteps: 10 # segment count based plan generator max steps
  growingRowCountWeight: 4 # the memory weight of growing segment row count
  delegatorMemoryOverloadFactor: 0.1 # the factor of delegator overloaded memory
  balanceCostThreshold: 0.001 # the threshold of balance cost, if the difference of cluster's cost after executing the balance plan is less than this value, the plan will not be executed
  checkSegmentInterval: 1000
  checkChannelInterval: 1000
  checkBalanceInterval: 10000
  checkIndexInterval: 10000
  channelTaskTimeout: 60000 # 1 minute
  segmentTaskTimeout: 120000 # 2 minute
  distPullInterval: 500
  heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
  loadTimeoutSeconds: 600
  distRequestTimeout: 5000 # the request timeout for querycoord fetching data distribution from querynodes, in milliseconds
  heatbeatWarningLag: 5000 # the lag value for querycoord report warning when last heatbeat is too old, in milliseconds
  checkHandoffInterval: 5000
  enableActiveStandby: false
  checkInterval: 1000
  checkHealthInterval: 3000 # 3s, the interval when query coord try to check health of query node
  checkHealthRPCTimeout: 2000 # 100ms, the timeout of check health rpc to query node
  brokerTimeout: 5000 # 5000ms, querycoord broker rpc timeout
  collectionRecoverTimes: 3 # if collection recover times reach the limit during loading state, release it
  observerTaskParallel: 16 # the parallel observer dispatcher task number
  checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
  checkNodeSessionInterval: 60 # the interval(in seconds) of check querynode cluster session
  gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
  enableStoppingBalance: true # whether enable stopping balance
  channelExclusiveNodeFactor: 4 # the least node number for enable channel's exclusive mode
  collectionObserverInterval: 200 # the interval of collection observer
  checkExecutedFlagInterval: 100 # the interval of check executed flag to force to pull dist
  updateCollectionLoadStatusInterval: 5 # 5m, max interval of updating collection loaded status for check health
  cleanExcludeSegmentInterval: 60 # the time duration of clean pipeline exclude segment which used for filter invalid data, in seconds
  ip:  # TCP/IP address of queryCoord. If not specified, use the first unicastable address
  port: 19531 # TCP port of queryCoord
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the queryCoord can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the queryCoord can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on queryCoord can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on queryCoord can receive, unit: byte

# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
queryNode:
  stats:
    publishInterval: 1000 # The interval that query node publishes the node statistics information, including segment status, cpu usage, memory usage, health status, etc. Unit: ms.
  segcore:
    knowhereThreadPoolNumRatio: 4 # The number of threads in knowhere's thread pool. If disk is enabled, the pool size will multiply with knowhereThreadPoolNumRatio([1, 32]).
    chunkRows: 128 # Row count by which Segcore divides a segment into chunks.
    interimIndex:
      # Whether to create a temporary index for growing segments and sealed segments not yet indexed, improving search performance.
      # Milvus will eventually seals and indexes all segments, but enabling this optimizes search performance for immediate queries following data insertion.
      # This defaults to true, indicating that Milvus creates temporary index for growing segments and the sealed segments that are not indexed upon searches.
      enableIndex: true
      nlist: 128 # temp index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
      nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
      memExpansionRate: 1.15 # extra memory needed by building interim index
      buildParallelRate: 0.5 # the ratio of building interim index parallel matched with cpu num
    multipleChunkedEnable: true # Enable multiple chunked search
    knowhereScoreConsistency: false # Enable knowhere strong consistency score computation logic
  loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
  enableDisk: false # enable querynode load disk index, and search on disk index
  maxDiskUsagePercentage: 95
  cache:
    memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
    readAheadPolicy: willneed # The read ahead policy of chunk cache, options: `normal, random, sequential, willneed, dontneed`
    # options: async, sync, disable. 
    # Specifies the necessity for warming up the chunk cache. 
    # 1. If set to "sync" or "async" the original vector data will be synchronously/asynchronously loaded into the 
    # chunk cache during the load process. This approach has the potential to substantially reduce query/search latency
    # for a specific duration post-load, albeit accompanied by a concurrent increase in disk usage;
    # 2. If set to "disable" original vector data will only be loaded into the chunk cache during search/query.
    warmup: disable
  mmap:
    vectorField: false # Enable mmap for loading vector data
    vectorIndex: false # Enable mmap for loading vector index
    scalarField: false # Enable mmap for loading scalar data
    scalarIndex: false # Enable mmap for loading scalar index
    chunkCache: true # Enable mmap for chunk cache (raw vector retrieving).
    # Enable memory mapping (mmap) to optimize the handling of growing raw data. 
    # By activating this feature, the memory overhead associated with newly added or modified data will be significantly minimized. 
    # However, this optimization may come at the cost of a slight decrease in query latency for the affected data segments.
    growingMmapEnabled: false
    fixedFileSizeForMmapAlloc: 1 # tmp file size for mmap chunk manager
    maxDiskUsagePercentageForMmapAlloc: 50 # disk percentage used in mmap chunk manager
  lazyload:
    enabled: false # Enable lazyload for loading data
    waitTimeout: 30000 # max wait timeout duration in milliseconds before start to do lazyload search and retrieve
    requestResourceTimeout: 5000 # max timeout in milliseconds for waiting request resource for lazy load, 5s by default
    requestResourceRetryInterval: 2000 # retry interval in milliseconds for waiting request resource for lazy load, 2s by default
    maxRetryTimes: 1 # max retry times for lazy load, 1 by default
    maxEvictPerRetry: 1 # max evict count for lazy load, 1 by default
  indexOffsetCacheEnabled: false # enable index offset cache for some scalar indexes, now is just for bitmap index, enable this param can improve performance for retrieving raw data from index
  grouping:
    enabled: true
    maxNQ: 1000
    topKMergeRatio: 20
  scheduler:
    receiveChanSize: 10240
    unsolvedQueueSize: 10240
    # maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
    # Max read concurrency would be the value of hardware.GetCPUNum * maxReadConcurrentRatio.
    # It defaults to 2.0, which means max read concurrency would be the value of hardware.GetCPUNum * 2.
    # Max read concurrency must greater than or equal to 1, and less than or equal to hardware.GetCPUNum * 100.
    # (0, 100]
    maxReadConcurrentRatio: 1
    cpuRatio: 10 # ratio used to estimate read task cpu usage.
    maxTimestampLag: 86400
    scheduleReadPolicy:
      # fifo: A FIFO queue support the schedule.
      # user-task-polling:
      # 	The user's tasks will be polled one by one and scheduled.
      # 	Scheduling is fair on task granularity.
      # 	The policy is based on the username for authentication.
      # 	And an empty username is considered the same user.
      # 	When there are no multi-users, the policy decay into FIFO"
      name: fifo
      taskQueueExpire: 60 # Control how long (many seconds) that queue retains since queue is empty
      enableCrossUserGrouping: false # Enable Cross user grouping when using user-task-polling policy. (Disable it if user's task can not merge each other)
      maxPendingTaskPerUser: 1024 # Max pending task per user in scheduler
  levelZeroForwardPolicy: RemoteLoad # delegator level zero deletion forward policy, possible option["FilterByBF", "RemoteLoad"]
  streamingDeltaForwardPolicy: FilterByBF # delegator streaming deletion forward policy, possible option["FilterByBF", "Direct"]
  dataSync:
    flowGraph:
      maxQueueLength: 16 # The maximum size of task queue cache in flow graph in query node.
      maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
  enableSegmentPrune: false # use partition stats to prune data in search/query on shard delegator
  queryStreamBatchSize: 4194304 # return min batch size of stream query
  queryStreamMaxBatchSize: 134217728 # return max batch size of stream query
  bloomFilterApplyParallelFactor: 4 # parallel factor when to apply pk to bloom filter, default to 4*CPU_CORE_NUM
  workerPooling:
    size: 10 # the size for worker querynode client pool
  ip:  # TCP/IP address of queryNode. If not specified, use the first unicastable address
  port: 21123 # TCP port of queryNode
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the queryNode can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the queryNode can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on queryNode can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on queryNode can receive, unit: byte

indexCoord:
  bindIndexNodeMode:
    enable: false
    address: localhost:22930
    withCred: false
    nodeID: 0
  segment:
    minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed

indexNode:
  scheduler:
    buildParallel: 1
  enableDisk: true # enable index node build disk vector index
  maxDiskUsagePercentage: 95
  ip:  # TCP/IP address of indexNode. If not specified, use the first unicastable address
  port: 21121 # TCP port of indexNode
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the indexNode can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the indexNode can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on indexNode can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on indexNode can receive, unit: byte

dataCoord:
  channel:
    watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
    legacyVersionWithoutRPCWatch: 2.4.1 # Datanodes <= this version are considered as legacy nodes, which doesn't have rpc based watch(). This is only used during rolling upgrade where legacy nodes won't get new channels
    balanceSilentDuration: 300 # The duration after which the channel manager start background channel balancing
    balanceInterval: 360 # The interval with which the channel manager check dml channel balance status
    checkInterval: 1 # The interval in seconds with which the channel manager advances channel states
    notifyChannelOperationTimeout: 5 # Timeout notifing channel operations (in seconds).
  segment:
    maxSize: 1024 # The maximum size of a segment, unit: MB. datacoord.segment.maxSize and datacoord.segment.sealProportion together determine if a segment can be sealed.
    diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index
    sealProportion: 0.12 # The minimum proportion to datacoord.segment.maxSize to seal a segment. datacoord.segment.maxSize and datacoord.segment.sealProportion together determine if a segment can be sealed.
    sealProportionJitter: 0.1 # segment seal proportion jitter ratio, default value 0.1(10%), if seal proportion is 12%, with jitter=0.1, the actuall applied ratio will be 10.8~12%
    assignmentExpiration: 2000 # Expiration time of the segment assignment, unit: ms
    allocLatestExpireAttempt: 200 # The time attempting to alloc latest lastExpire from rootCoord after restart
    maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
    # If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
    # minSizeFromIdleToSealed, Milvus will automatically seal it.
    # The max idle time of segment in seconds, 10*60.
    maxIdleTime: 600
    minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
    # The max number of binlog (which is equal to the binlog file num of primary key) for one segment, 
    # the segment will be sealed if the number of binlog file reaches to max value.
    maxBinlogFileNumber: 32
    smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than
    # (smallProportion * segment max # of rows).
    # A compaction will happen on small segments if the segment after compaction will have
    compactableProportion: 0.85
    # over (compactableProportion * segment max # of rows) rows.
    # MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
    # During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%. 
    expansionRate: 1.25
  sealPolicy:
    channel:
      # The size threshold in MB, if the total size of growing segments of each shard 
      # exceeds this threshold, the largest growing segment will be sealed.
      growingSegmentsMemSize: 4096
  autoUpgradeSegmentIndex: false # whether auto upgrade segment index to index engine's version
  segmentFlushInterval: 2 # the minimal interval duration(unit: Seconds) between flusing operation on same segment
  # Switch value to control if to enable segment compaction. 
  # Compaction merges small-size segments into a large segment, and clears the entities deleted beyond the rentention duration of Time Travel.
  enableCompaction: true
  compaction:
    # Switch value to control if to enable automatic segment compaction during which data coord locates and merges compactable segments in the background.
    # This configuration takes effect only when dataCoord.enableCompaction is set as true.
    enableAutoCompaction: true
    indexBasedCompaction: true
    # compaction task prioritizer, options: [default, level, mix]. 
    # default is FIFO.
    # level is prioritized by level: L0 compactions first, then mix compactions, then clustering compactions.
    # mix is prioritized by level: mix compactions first, then L0 compactions, then clustering compactions.
    taskPrioritizer: default
    taskQueueCapacity: 256 # compaction task queue size
    rpcTimeout: 10
    maxParallelTaskNum: 10
    dropTolerance: 86400 # Compaction task will be cleaned after finish longer than this time(in seconds)
    gcInterval: 1800 # The time interval in seconds for compaction gc
    mix:
      triggerInterval: 60 # The time interval in seconds to trigger mix compaction
    levelzero:
      triggerInterval: 10 # The time interval in seconds for trigger L0 compaction
      forceTrigger:
        minSize: 8388608 # The minmum size in bytes to force trigger a LevelZero Compaction, default as 8MB
        maxSize: 67108864 # The maxmum size in bytes to force trigger a LevelZero Compaction, default as 64MB
        deltalogMinNum: 10 # The minimum number of deltalog files to force trigger a LevelZero Compaction
        deltalogMaxNum: 30 # The maxmum number of deltalog files to force trigger a LevelZero Compaction, default as 30
    single:
      ratio:
        threshold: 0.2 # The ratio threshold of a segment to trigger a single compaction, default as 0.2
      deltalog:
        maxsize: 16777216 # The deltalog size of a segment to trigger a single compaction, default as 16MB
        maxnum: 200 # The deltalog count of a segment to trigger a compaction, default as 200
      expiredlog:
        maxsize: 10485760 # The expired log size of a segment to trigger a compaction, default as 10MB
    clustering:
      enable: true # Enable clustering compaction
      autoEnable: false # Enable auto clustering compaction
      triggerInterval: 600 # clustering compaction trigger interval in seconds
      minInterval: 3600 # The minimum interval between clustering compaction executions of one collection, to avoid redundant compaction
      maxInterval: 259200 # If a collection haven't been clustering compacted for longer than maxInterval, force compact
      newDataSizeThreshold: 512m # If new data size is large than newDataSizeThreshold, execute clustering compaction
      preferSegmentSizeRatio: 0.8
      maxSegmentSizeRatio: 1
      maxTrainSizeRatio: 0.8 # max data size ratio in Kmeans train, if larger than it, will down sampling to meet this limit
      maxCentroidsNum: 10240 # maximum centroids number in Kmeans train
      minCentroidsNum: 16 # minimum centroids number in Kmeans train
      minClusterSizeRatio: 0.01 # minimum cluster size / avg size in Kmeans train
      maxClusterSizeRatio: 10 # maximum cluster size / avg size in Kmeans train
      maxClusterSize: 5g # maximum cluster size in Kmeans train
  syncSegmentsInterval: 300 # The time interval for regularly syncing segments
  enableGarbageCollection: true # Switch value to control if to enable garbage collection to clear the discarded data in MinIO or S3 service.
  gc:
    interval: 3600 # The interval at which data coord performs garbage collection, unit: second.
    missingTolerance: 86400 # The retention duration of the unrecorded binary log (binlog) files. Setting a reasonably large value for this parameter avoids erroneously deleting the newly created binlog files that lack metadata. Unit: second.
    dropTolerance: 10800 # The retention duration of the binlog files of the deleted segments before they are cleared, unit: second.
    removeConcurrent: 32 # number of concurrent goroutines to remove dropped s3 objects
    scanInterval: 168 # orphan file (file on oss but has not been registered on meta) on object storage garbage collection scanning interval in hours
  enableActiveStandby: false
  brokerTimeout: 5000 # 5000ms, dataCoord broker rpc timeout
  autoBalance: true # Enable auto balance
  checkAutoBalanceConfigInterval: 10 # the interval of check auto balance config
  import:
    filesPerPreImportTask: 2 # The maximum number of files allowed per pre-import task.
    taskRetention: 10800 # The retention period in seconds for tasks in the Completed or Failed state.
    maxSizeInMBPerImportTask: 6144 # To prevent generating of small segments, we will re-group imported files. This parameter represents the sum of file sizes in each group (each ImportTask).
    scheduleInterval: 2 # The interval for scheduling import, measured in seconds.
    checkIntervalHigh: 2 # The interval for checking import, measured in seconds, is set to a high frequency for the import checker.
    checkIntervalLow: 120 # The interval for checking import, measured in seconds, is set to a low frequency for the import checker.
    maxImportFileNumPerReq: 1024 # The maximum number of files allowed per single import request.
    maxImportJobNum: 1024 # Maximum number of import jobs that are executing or pending.
    waitForIndex: true # Indicates whether the import operation waits for the completion of index building.
  gracefulStopTimeout: 5 # seconds. force stop node without graceful stop
  slot:
    clusteringCompactionUsage: 16 # slot usage of clustering compaction job.
    mixCompactionUsage: 8 # slot usage of mix compaction job.
    l0DeleteCompactionUsage: 8 # slot usage of l0 compaction job.
  ip:  # TCP/IP address of dataCoord. If not specified, use the first unicastable address
  port: 13333 # TCP port of dataCoord
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the dataCoord can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the dataCoord can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on dataCoord can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on dataCoord can receive, unit: byte

dataNode:
  dataSync:
    flowGraph:
      maxQueueLength: 16 # Maximum length of task queue in flowgraph
      maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
    maxParallelSyncMgrTasks: 256 # The max concurrent sync task number of datanode sync mgr globally
    skipMode:
      enable: true # Support skip some timetick message to reduce CPU usage
      skipNum: 4 # Consume one for every n records skipped
      coldTime: 60 # Turn on skip mode after there are only timetick msg for x seconds
  segment:
    # The maximum size of each binlog file in a segment buffered in memory. Binlog files whose size exceeds this value are then flushed to MinIO or S3 service.
    # Unit: Byte
    # Setting this parameter too small causes the system to store a small amount of data too frequently. Setting it too large increases the system's demand for memory.
    insertBufSize: 16777216
    deleteBufBytes: 16777216 # Max buffer size in bytes to flush del for a single channel, default as 16MB
    syncPeriod: 600 # The period to sync segments if buffer is not empty.
  memory:
    forceSyncEnable: true # Set true to force sync if memory usage is too high
    forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced.
    checkInterval: 3000 # the interal to check datanode memory usage, in milliseconds
    forceSyncWatermark: 0.5 # memory watermark for standalone, upon reaching this watermark, segments will be synced.
  timetick:
    interval: 500
  channel:
    # specify the size of global work pool of all channels
    # if this parameter <= 0, will set it as the maximum number of CPUs that can be executing
    # suggest to set it bigger on large collection numbers to avoid blocking
    workPoolSize: -1
    # specify the size of global work pool for channel checkpoint updating
    # if this parameter <= 0, will set it as 10
    updateChannelCheckpointMaxParallel: 10
    updateChannelCheckpointInterval: 60 # the interval duration(in seconds) for datanode to update channel checkpoint of each channel
    updateChannelCheckpointRPCTimeout: 20 # timeout in seconds for UpdateChannelCheckpoint RPC call
    maxChannelCheckpointsPerPRC: 128 # The maximum number of channel checkpoints per UpdateChannelCheckpoint RPC.
    channelCheckpointUpdateTickInSeconds: 10 # The frequency, in seconds, at which the channel checkpoint updater executes updates.
  import:
    maxConcurrentTaskNum: 16 # The maximum number of import/pre-import tasks allowed to run concurrently on a datanode.
    maxImportFileSizeInGB: 16 # The maximum file size (in GB) for an import file, where an import file refers to either a Row-Based file or a set of Column-Based files.
    readBufferSizeInMB: 16 # The data block size (in MB) read from chunk manager by the datanode during import.
    maxTaskSlotNum: 16 # The maximum number of slots occupied by each import/pre-import task.
  compaction:
    levelZeroBatchMemoryRatio: 0.5 # The minimal memory ratio of free memory for level zero compaction executing in batch mode
    levelZeroMaxBatchSize: -1 # Max batch size refers to the max number of L1/L2 segments in a batch when executing L0 compaction. Default to -1, any value that is less than 1 means no limit. Valid range: >= 1.
  gracefulStopTimeout: 1800 # seconds. force stop node without graceful stop
  slot:
    slotCap: 16 # The maximum number of tasks(e.g. compaction, importing) allowed to run concurrently on a datanode
  clusteringCompaction:
    memoryBufferRatio: 0.1 # The ratio of memory buffer of clustering compaction. Data larger than threshold will be flushed to storage.
    workPoolSize: 8 # worker pool size for one clustering compaction job.
  bloomFilterApplyParallelFactor: 4 # parallel factor when to apply pk to bloom filter, default to 4*CPU_CORE_NUM
  storage:
    deltalog: json # deltalog format, options: [json, parquet]
  ip:  # TCP/IP address of dataNode. If not specified, use the first unicastable address
  port: 21124 # TCP port of dataNode
  grpc:
    serverMaxSendSize: 536870912 # The maximum size of each RPC request that the dataNode can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the dataNode can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on dataNode can send, unit: byte
    clientMaxRecvSize: 536870912 # The maximum size of each RPC request that the clients on dataNode can receive, unit: byte

# This topic introduces the message channel-related configurations of Milvus.
msgChannel:
  chanNamePrefix:
    # Root name prefix of the channel when a message channel is created.
    # It is recommended to change this parameter before starting Milvus for the first time.
    # To share a Pulsar instance among multiple Milvus instances, consider changing this to a name rather than the default one for each Milvus instance before you start them.
    cluster: by-dev
    # Sub-name prefix of the message channel where the root coord publishes time tick messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordTimeTick}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    rootCoordTimeTick: rootcoord-timetick
    # Sub-name prefix of the message channel where the root coord publishes its own statistics messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordStatistics}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    rootCoordStatistics: rootcoord-statistics
    # Sub-name prefix of the message channel where the root coord publishes Data Manipulation Language (DML) messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.rootCoordDml}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    rootCoordDml: rootcoord-dml
    replicateMsg: replicate-msg
    # Sub-name prefix of the message channel where the query node publishes time tick messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.queryTimeTick}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    queryTimeTick: queryTimeTick
    # Sub-name prefix of the message channel where the data coord publishes time tick messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.dataCoordTimeTick}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    dataCoordTimeTick: datacoord-timetick-channel
    # Sub-name prefix of the message channel where the data coord publishes segment information messages.
    # The complete channel name prefix is ${msgChannel.chanNamePrefix.cluster}-${msgChannel.chanNamePrefix.dataCoordSegmentInfo}
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    dataCoordSegmentInfo: segment-info-channel
  subNamePrefix:
    # Subscription name prefix of the data coord.
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    dataCoordSubNamePrefix: dataCoord
    # Subscription name prefix of the data node.
    # Caution: Changing this parameter after using Milvus for a period of time will affect your access to old data.
    # It is recommended to change this parameter before starting Milvus for the first time.
    dataNodeSubNamePrefix: dataNode

# Configures the system log output.
log:
  # Milvus log level. Option: debug, info, warn, error, panic, and fatal. 
  # It is recommended to use debug level under test and development environments, and info level in production environment.
  level: info
  file:
    # Root path to the log files.
    # The default value is set empty, indicating to output log files to standard output (stdout) and standard error (stderr).
    # If this parameter is set to a valid local path, Milvus writes and stores log files in this path.
    # Set this parameter as the path that you have permission to write.
    rootPath: 
    maxSize: 300 # The maximum size of a log file, unit: MB.
    maxAge: 10 # The maximum retention time before a log file is automatically cleared, unit: day. The minimum value is 1.
    maxBackups: 20 # The maximum number of log files to back up, unit: day. The minimum value is 1.
  format: text # Milvus log format. Option: text and JSON
  stdout: true # Stdout enable or not

grpc:
  log:
    level: WARNING
  gracefulStopTimeout: 10 # second, time to wait graceful stop finish
  client:
    compressionEnabled: false
    dialTimeout: 200
    keepAliveTime: 10000
    keepAliveTimeout: 20000
    maxMaxAttempts: 10
    initialBackoff: 0.2
    maxBackoff: 10
    backoffMultiplier: 2
    minResetInterval: 1000
    maxCancelError: 32
    minSessionCheckInterval: 200

# Configure the proxy tls enable.
tls:
  serverPemPath: configs/cert/server.pem
  serverKeyPath: configs/cert/server.key
  caPemPath: configs/cert/ca.pem

common:
  defaultPartitionName: _default # Name of the default partition when a collection is created
  defaultIndexName: _default_idx # Name of the index when it is created with name unspecified
  entityExpiration: -1 # Entity expiration in seconds, CAUTION -1 means never expire
  indexSliceSize: 16 # Index slice size in MB
  threadCoreCoefficient:
    highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority pool
    middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority pool
    lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority pool
  buildIndexThreadPoolRatio: 0.75
  DiskIndex:
    MaxDegree: 56
    SearchListSize: 100
    PQCodeBudgetGBRatio: 0.125
    BuildNumThreadsRatio: 1
    SearchCacheBudgetGBRatio: 0.1
    LoadNumThreadRatio: 8
    BeamWidthRatio: 4
  gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
  gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
  storageType: remote # please adjust in embedded Milvus: local, available values are [local, remote, opendal], value minio is deprecated, use remote instead
  # Default value: auto
  # Valid values: [auto, avx512, avx2, avx, sse4_2]
  # This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
  simdType: auto
  security:
    authorizationEnabled: false
    # The superusers will ignore some system check processes,
    # like the old password verification when updating the credential
    superUsers: 
    defaultRootPassword: Milvus # default password for root user
    tlsMode: 0
  session:
    ttl: 30 # ttl value when session granting a lease to register service
    retryTimes: 30 # retry times when session sending etcd requests
  locks:
    metrics:
      enable: false # whether gather statistics for metrics locks
    threshold:
      info: 500 # minimum milliseconds for printing durations in info level
      warn: 1000 # minimum milliseconds for printing durations in warn level
  storage:
    scheme: s3
    enablev2: false
  # Whether to disable the internal time messaging mechanism for the system. 
  # If disabled (set to false), the system will not allow DML operations, including insertion, deletion, queries, and searches. 
  # This helps Milvus-CDC synchronize incremental data
  ttMsgEnabled: true
  traceLogMode: 0 # trace request info
  bloomFilterSize: 100000 # bloom filter initial size
  bloomFilterType: BlockedBloomFilter # bloom filter type, support BasicBloomFilter and BlockedBloomFilter
  maxBloomFalsePositive: 0.001 # max false positive rate for bloom filter
  bloomFilterApplyBatchSize: 1000 # batch size when to apply pk to bloom filter
  usePartitionKeyAsClusteringKey: false # if true, do clustering compaction and segment prune on partition key field
  useVectorAsClusteringKey: false # if true, do clustering compaction and segment prune on vector field
  enableVectorClusteringKey: false # if true, enable vector clustering key and vector clustering compaction

# QuotaConfig, configurations of Milvus quota and limits.
# By default, we enable:
#   1. TT protection;
#   2. Memory protection.
#   3. Disk quota protection.
# You can enable:
#   1. DML throughput limitation;
#   2. DDL, DQL qps/rps limitation;
#   3. DQL Queue length/latency protection;
#   4. DQL result rate protection;
# If necessary, you can also manually force to deny RW requests.
quotaAndLimits:
  enabled: true # `true` to enable quota and limits, `false` to disable.
  # quotaCenterCollectInterval is the time interval that quotaCenter
  # collects metrics from Proxies, Query cluster and Data cluster.
  # seconds, (0 ~ 65536)
  quotaCenterCollectInterval: 3
  limits:
    allocRetryTimes: 15 # retry times when delete alloc forward data from rate limit failed
    allocWaitInterval: 1000 # retry wait duration when delete alloc forward data rate failed, in millisecond
    complexDeleteLimitEnable: false # whether complex delete check forward data by limiter
    maxCollectionNum: 65536
    maxCollectionNumPerDB: 65536 # Maximum number of collections per database.
    maxInsertSize: -1 # maximum size of a single insert request, in bytes, -1 means no limit
    maxResourceGroupNumOfQueryNode: 1024 # maximum number of resource groups of query nodes
    maxGroupSize: 10 # maximum size for one single group when doing search group by
  ddl:
    enabled: false # Whether DDL request throttling is enabled.
    # Maximum number of collection-related DDL requests per second.
    # Setting this item to 10 indicates that Milvus processes no more than 10 collection-related DDL requests per second, including collection creation requests, collection drop requests, collection load requests, and collection release requests.
    # To use this setting, set quotaAndLimits.ddl.enabled to true at the same time.
    collectionRate: -1
    # Maximum number of partition-related DDL requests per second.
    # Setting this item to 10 indicates that Milvus processes no more than 10 partition-related requests per second, including partition creation requests, partition drop requests, partition load requests, and partition release requests.
    # To use this setting, set quotaAndLimits.ddl.enabled to true at the same time.
    partitionRate: -1
    db:
      collectionRate: -1 # qps of db level , default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
      partitionRate: -1 # qps of db level, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
  indexRate:
    enabled: false # Whether index-related request throttling is enabled.
    # Maximum number of index-related requests per second.
    # Setting this item to 10 indicates that Milvus processes no more than 10 partition-related requests per second, including index creation requests and index drop requests.
    # To use this setting, set quotaAndLimits.indexRate.enabled to true at the same time.
    max: -1
    db:
      max: -1 # qps of db level, default no limit, rate for CreateIndex, DropIndex
  flushRate:
    enabled: true # Whether flush request throttling is enabled.
    # Maximum number of flush requests per second.
    # Setting this item to 10 indicates that Milvus processes no more than 10 flush requests per second.
    # To use this setting, set quotaAndLimits.flushRate.enabled to true at the same time.
    max: -1
    collection:
      max: 0.1 # qps, default no limit, rate for flush at collection level.
    db:
      max: -1 # qps of db level, default no limit, rate for flush
  compactionRate:
    enabled: false # Whether manual compaction request throttling is enabled.
    # Maximum number of manual-compaction requests per second.
    # Setting this item to 10 indicates that Milvus processes no more than 10 manual-compaction requests per second.
    # To use this setting, set quotaAndLimits.compaction.enabled to true at the same time.
    max: -1
    db:
      max: -1 # qps of db level, default no limit, rate for manualCompaction
  dml:
    enabled: false # Whether DML request throttling is enabled.
    insertRate:
      # Highest data insertion rate per second.
      # Setting this item to 5 indicates that Milvus only allows data insertion at the rate of 5 MB/s.
      # To use this setting, set quotaAndLimits.dml.enabled to true at the same time.
      max: -1
      db:
        max: -1 # MB/s, default no limit
      collection:
        # Highest data insertion rate per collection per second.
        # Setting this item to 5 indicates that Milvus only allows data insertion to any collection at the rate of 5 MB/s.
        # To use this setting, set quotaAndLimits.dml.enabled to true at the same time.
        max: -1
      partition:
        max: -1 # MB/s, default no limit
    upsertRate:
      max: -1 # MB/s, default no limit
      db:
        max: -1 # MB/s, default no limit
      collection:
        max: -1 # MB/s, default no limit
      partition:
        max: -1 # MB/s, default no limit
    deleteRate:
      # Highest data deletion rate per second.
      # Setting this item to 0.1 indicates that Milvus only allows data deletion at the rate of 0.1 MB/s.
      # To use this setting, set quotaAndLimits.dml.enabled to true at the same time.
      max: -1
      db:
        max: -1 # MB/s, default no limit
      collection:
        # Highest data deletion rate per second.
        # Setting this item to 0.1 indicates that Milvus only allows data deletion from any collection at the rate of 0.1 MB/s.
        # To use this setting, set quotaAndLimits.dml.enabled to true at the same time.
        max: -1
      partition:
        max: -1 # MB/s, default no limit
    bulkLoadRate:
      max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
      db:
        max: -1 # MB/s, default no limit, not support yet. TODO: limit db bulkLoad rate
      collection:
        max: -1 # MB/s, default no limit, not support yet. TODO: limit collection bulkLoad rate
      partition:
        max: -1 # MB/s, default no limit, not support yet. TODO: limit partition bulkLoad rate
  dql:
    enabled: false # Whether DQL request throttling is enabled.
    searchRate:
      # Maximum number of vectors to search per second.
      # Setting this item to 100 indicates that Milvus only allows searching 100 vectors per second no matter whether these 100 vectors are all in one search or scattered across multiple searches.
      # To use this setting, set quotaAndLimits.dql.enabled to true at the same time.
      max: -1
      db:
        max: -1 # vps (vectors per second), default no limit
      collection:
        # Maximum number of vectors to search per collection per second.
        # Setting this item to 100 indicates that Milvus only allows searching 100 vectors per second per collection no matter whether these 100 vectors are all in one search or scattered across multiple searches.
        # To use this setting, set quotaAndLimits.dql.enabled to true at the same time.
        max: -1
      partition:
        max: -1 # vps (vectors per second), default no limit
    queryRate:
      # Maximum number of queries per second.
      # Setting this item to 100 indicates that Milvus only allows 100 queries per second.
      # To use this setting, set quotaAndLimits.dql.enabled to true at the same time.
      max: -1
      db:
        max: -1 # qps, default no limit
      collection:
        # Maximum number of queries per collection per second.
        # Setting this item to 100 indicates that Milvus only allows 100 queries per collection per second.
        # To use this setting, set quotaAndLimits.dql.enabled to true at the same time.
        max: -1
      partition:
        max: -1 # qps, default no limit
  limitWriting:
    # forceDeny false means dml requests are allowed (except for some
    # specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
    forceDeny: false
    ttProtection:
      enabled: false
      # maxTimeTickDelay indicates the backpressure for DML Operations.
      # DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
      # if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
      # seconds
      maxTimeTickDelay: 300
    memProtection:
      # When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
      # When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
      # When memory usage < memoryLowWaterLevel, no action.
      enabled: true
      dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes
      dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes
      queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes
      queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes
    growingSegmentsSizeProtection:
      # No action will be taken if the growing segments size is less than the low watermark.
      # When the growing segments size exceeds the low watermark, the dml rate will be reduced,
      # but the rate will not be lower than minRateRatio * dmlRate.
      enabled: false
      minRateRatio: 0.5
      lowWaterLevel: 0.2
      highWaterLevel: 0.4
    diskProtection:
      enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
      diskQuota: -1 # MB, (0, +inf), default no limit
      diskQuotaPerDB: -1 # MB, (0, +inf), default no limit
      diskQuotaPerCollection: -1 # MB, (0, +inf), default no limit
      diskQuotaPerPartition: -1 # MB, (0, +inf), default no limit
    l0SegmentsRowCountProtection:
      enabled: false # switch to enable l0 segment row count quota
      lowWaterLevel: 30000000 # l0 segment row count quota, low water level
      highWaterLevel: 50000000 # l0 segment row count quota, high water level
    deleteBufferRowCountProtection:
      enabled: false # switch to enable delete buffer row count quota
      lowWaterLevel: 32768 # delete buffer row count quota, low water level
      highWaterLevel: 65536 # delete buffer row count quota, high water level
    deleteBufferSizeProtection:
      enabled: false # switch to enable delete buffer size quota
      lowWaterLevel: 134217728 # delete buffer size quota, low water level
      highWaterLevel: 268435456 # delete buffer size quota, high water level
  limitReading:
    # forceDeny false means dql requests are allowed (except for some
    # specific conditions, such as collection has been dropped), true means always reject all dql requests.
    forceDeny: false

trace:
  # trace exporter type, default is stdout,
  # optional values: ['noop','stdout', 'jaeger', 'otlp']
  exporter: noop
  # fraction of traceID based sampler,
  # optional values: [0, 1]
  # Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
  sampleFraction: 0
  jaeger:
    url:  # when exporter is jaeger should set the jaeger's URL
  otlp:
    endpoint:  # example: "127.0.0.1:4317" for grpc, "127.0.0.1:4318" for http
    method:  # otlp export method, acceptable values: ["grpc", "http"],  using "grpc" by default
    secure: true
  initTimeoutSeconds: 10 # segcore initialization timeout in seconds, preventing otlp grpc hangs forever

#when using GPU indexing, Milvus will utilize a memory pool to avoid frequent memory allocation and deallocation.
#here, you can set the size of the memory occupied by the memory pool, with the unit being MB.
#note that there is a possibility of Milvus crashing when the actual memory demand exceeds the value set by maxMemSize.
#if initMemSize and MaxMemSize both set zero,
#milvus will automatically initialize half of the available GPU memory,
#maxMemSize will the whole available GPU memory.
gpu:
  initMemSize: 2048 # Gpu Memory Pool init size
  maxMemSize: 4096 # Gpu Memory Pool Max size

# Any configuration related to the streaming node server.
streamingNode:
  ip:  # TCP/IP address of streamingNode. If not specified, use the first unicastable address
  port: 22222 # TCP port of streamingNode
  grpc:
    serverMaxSendSize: 268435456 # The maximum size of each RPC request that the streamingNode can send, unit: byte
    serverMaxRecvSize: 268435456 # The maximum size of each RPC request that the streamingNode can receive, unit: byte
    clientMaxSendSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can send, unit: byte
    clientMaxRecvSize: 268435456 # The maximum size of each RPC request that the clients on streamingNode can receive, unit: byte

# Any configuration related to the streaming service.
streaming:
  walBalancer:
    # The interval of balance task trigger at background, 1 min by default. 
    # It's ok to set it into duration string, such as 30s or 1m30s, see time.ParseDuration
    triggerInterval: 1m
    # The initial interval of balance task trigger backoff, 50 ms by default. 
    # It's ok to set it into duration string, such as 30s or 1m30s, see time.ParseDuration
    backoffInitialInterval: 50ms
    backoffMultiplier: 2 # The multiplier of balance task trigger backoff, 2 by default
  txn:
    defaultKeepaliveTimeout: 10s # The default keepalive timeout for wal txn, 10s by default

# Any configuration related to the knowhere vector search engine
knowhere:
  enable: true # When enable this configuration, the index parameters defined following will be automatically populated as index parameters, without requiring user input.
  DISKANN:
    build:
      max_degree: 56 # Maximum degree of the Vamana graph
      pq_code_budget_gb_ratio: 0.125 # Size limit on the PQ code (compared with raw data)
      search_cache_budget_gb_ratio: 0.1 # Ratio of cached node numbers to raw data
      search_list_size: 100 # Size of the candidate list during building graph
    search:
      beam_width_ratio: 4 # Ratio between the maximum number of IO requests per search iteration and CPU number