From 351c64b60627fc63528ce92104c44de89bc63c3f Mon Sep 17 00:00:00 2001 From: Sheldon Date: Tue, 24 Oct 2023 09:30:10 +0800 Subject: [PATCH] fix some typos (#27851) 1. fix some typos in md,yaml #22893 Signed-off-by: Sheldon --- DEVELOPMENT.md | 2 +- ci/jenkins/PublishImages.groovy | 2 +- configs/advanced/etcd.yaml | 2 +- configs/milvus.yaml | 4 ++-- .../20210604-datanode_flowgraph_recovery_design.md | 2 +- docs/design_docs/20211217-milvus_create_collection.md | 4 ++-- docs/design_docs/20220105-proxy.md | 2 +- docs/design_docs/20220105-query_boolean_expr.md | 4 ++-- .../20230918-datanode_remove_datacoord_dependency.md | 4 ++-- docs/design_docs/segcore/segment_growing.md | 6 +++--- docs/developer_guides/appendix_a_basic_components.md | 2 +- docs/developer_guides/chap04_message_stream.md | 6 +++--- docs/developer_guides/chap05_proxy.md | 4 ++-- docs/developer_guides/chap07_query_coordinator.md | 2 +- docs/user_guides/tls_proxy.md | 6 +++--- 15 files changed, 26 insertions(+), 26 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 8606a20d5c7da..3ed05dbecc088 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -288,7 +288,7 @@ start the cluster on your host machine ```shell $ ./build/builder.sh make install // build milvus -$ ./build/build_image.sh // build milvus lastest docker image +$ ./build/build_image.sh // build milvus latest docker image $ docker images // check if milvus latest image is ready REPOSITORY TAG IMAGE ID CREATED SIZE milvusdb/milvus latest 63c62ff7c1b7 52 minutes ago 570MB diff --git a/ci/jenkins/PublishImages.groovy b/ci/jenkins/PublishImages.groovy index 2139fe5834b78..0d49ac0afed0b 100644 --- a/ci/jenkins/PublishImages.groovy +++ b/ci/jenkins/PublishImages.groovy @@ -27,7 +27,7 @@ pipeline { } stages { - stage('Generat Image Tag') { + stage('Generate Image Tag') { steps { script { def date = sh(returnStdout: true, script: 'date +%Y%m%d').trim() diff --git a/configs/advanced/etcd.yaml b/configs/advanced/etcd.yaml index e2d3e727f8068..79d005fb99fe7 100644 --- a/configs/advanced/etcd.yaml +++ b/configs/advanced/etcd.yaml @@ -15,7 +15,7 @@ # limitations under the License. # This is the configuration file for the etcd server. -# Only standalone users with embeded etcd should change this file, others could just keep this file As Is. +# Only standalone users with embedded etcd should change this file, others could just keep this file As Is. # All the etcd client should be added to milvus.yaml if necessary # Human-readable name for this member. diff --git a/configs/milvus.yaml b/configs/milvus.yaml index 22f87a2adc39b..b7f35ef76b4b6 100644 --- a/configs/milvus.yaml +++ b/configs/milvus.yaml @@ -111,7 +111,7 @@ mq: pulsar: address: localhost # Address of pulsar port: 6650 # Port of Pulsar - webport: 80 # Web port of pulsar, if you connect direcly without proxy, should use 8080 + webport: 80 # Web port of pulsar, if you connect directly without proxy, should use 8080 maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar. tenant: public namespace: default @@ -346,7 +346,7 @@ dataCoord: balanceInterval: 360 #The interval for the channelBalancer on datacoord to check balance status segment: maxSize: 512 # Maximum size of a segment in MB - diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index + diskSegmentMaxSize: 2048 # Maximum size of a segment in MB for collection which has Disk index sealProportion: 0.23 # The time of the assignment expiration in ms # Warning! this parameter is an expert variable and closely related to data integrity. Without specific diff --git a/docs/design_docs/20210604-datanode_flowgraph_recovery_design.md b/docs/design_docs/20210604-datanode_flowgraph_recovery_design.md index 9f89ec50629c3..7c4f28e655025 100644 --- a/docs/design_docs/20210604-datanode_flowgraph_recovery_design.md +++ b/docs/design_docs/20210604-datanode_flowgraph_recovery_design.md @@ -74,7 +74,7 @@ Supposing we have segments `s1, s2, s3`, corresponding positions `p1, p2, p3` const filter_threshold = recovery_time // mp means msgPack for mp := seeking(p1) { - if mp.position.endtime < filter_threshod { + if mp.position.endtime < filter_threshold { if mp.position < p3 { filter s3 } diff --git a/docs/design_docs/20211217-milvus_create_collection.md b/docs/design_docs/20211217-milvus_create_collection.md index c98f03a666faf..44f621f95364d 100644 --- a/docs/design_docs/20211217-milvus_create_collection.md +++ b/docs/design_docs/20211217-milvus_create_collection.md @@ -86,7 +86,7 @@ type createCollectionTask struct { } ``` - - `PostExecute`, `CreateCollectonTask` does nothing at this phase, and return directly. + - `PostExecute`, `CreateCollectionTask` does nothing at this phase, and return directly. 4. `RootCoord` would wrap the `CreateCollection` request into `CreateCollectionReqTask`, and then call function `executeTask`. `executeTask` would return until the `context` is done or `CreateCollectionReqTask.Execute` is returned. @@ -104,7 +104,7 @@ type CreateCollectionReqTask struct { } ``` -5. `CreateCollectionReqTask.Execute` would alloc `CollecitonID` and default `PartitionID`, and set `Virtual Channel` and `Physical Channel`, which are used by `MsgStream`, then write the `Collection`'s meta into `metaTable` +5. `CreateCollectionReqTask.Execute` would alloc `CollectionID` and default `PartitionID`, and set `Virtual Channel` and `Physical Channel`, which are used by `MsgStream`, then write the `Collection`'s meta into `metaTable` 6. After `Collection`'s meta written into `metaTable`, `Milvus` would consider this collection has been created successfully. diff --git a/docs/design_docs/20220105-proxy.md b/docs/design_docs/20220105-proxy.md index 20af39b16b688..c447a5a1652cb 100644 --- a/docs/design_docs/20220105-proxy.md +++ b/docs/design_docs/20220105-proxy.md @@ -127,7 +127,7 @@ future work. For DqRequest, request and result data are written to the stream. The request data will be written to DqRequestChannel, and the result data will be written to DqResultChannel. Proxy will write the request of the collection into the -DqRequestChannel, and the DqReqeustChannel will be jointly subscribed by a group of query nodes. When all query nodes +DqRequestChannel, and the DqRequestChannel will be jointly subscribed by a group of query nodes. When all query nodes receive the DqRequest, they will write the query results into the DqResultChannel corresponding to the collection. As the consumer of the DqResultChannel, Proxy is responsible for collecting the query results and aggregating them, The result is then returned to the client. diff --git a/docs/design_docs/20220105-query_boolean_expr.md b/docs/design_docs/20220105-query_boolean_expr.md index 656d27a1a1894..5e030f925bd5f 100644 --- a/docs/design_docs/20220105-query_boolean_expr.md +++ b/docs/design_docs/20220105-query_boolean_expr.md @@ -31,7 +31,7 @@ ConstantExpr := | UnaryArithOp ConstantExpr Constant := - INTERGER + INTEGER | FLOAT_NUMBER UnaryArithOp := @@ -64,7 +64,7 @@ CmpOp := | "==" | "!=" -INTERGER := 整数 +INTEGER := 整数 FLOAT_NUM := 浮点数 IDENTIFIER := 列名 ``` diff --git a/docs/design_docs/20230918-datanode_remove_datacoord_dependency.md b/docs/design_docs/20230918-datanode_remove_datacoord_dependency.md index 338aaa986ecbc..ec87e140e2630 100644 --- a/docs/design_docs/20230918-datanode_remove_datacoord_dependency.md +++ b/docs/design_docs/20230918-datanode_remove_datacoord_dependency.md @@ -61,7 +61,7 @@ The rules system shall follow is: {% note %} -**Note:** Segments meta shall be updated *BEFORE* changing the channel checkpoint in case of datanode crashing during the prodedure. Under this premise, reconsuming from the old checkpoint shall recover all the data and duplidated entires will be discarded by segment checkpoints. +**Note:** Segments meta shall be updated *BEFORE* changing the channel checkpoint in case of datanode crashing during the prodedure. Under this premise, reconsuming from the old checkpoint shall recover all the data and duplidated entries will be discarded by segment checkpoints. {% endnote %} @@ -78,7 +78,7 @@ The winning option is to: **Note:** `Datacoord` reloads from metastore periodically. Optimization 1: reload channel checkpoint first, then reload segment meta if newly read revision is greater than in-memory one. -Optimization 2: After `L0 segemnt` is implemented, datacoord shall refresh growing segments only. +Optimization 2: After `L0 segment` is implemented, datacoord shall refresh growing segments only. {% endnote %} diff --git a/docs/design_docs/segcore/segment_growing.md b/docs/design_docs/segcore/segment_growing.md index c3f8ad7da4027..74b3011bf40a3 100644 --- a/docs/design_docs/segcore/segment_growing.md +++ b/docs/design_docs/segcore/segment_growing.md @@ -2,13 +2,13 @@ Growing segment has the following additional interfaces: -1. `PreInsert(size) -> reseveredOffset`: serial interface, which reserves space for future insertion and returns the `reseveredOffset`. +1. `PreInsert(size) -> reservedOffset`: serial interface, which reserves space for future insertion and returns the `reservedOffset`. -2. `Insert(reseveredOffset, size, ...Data...)`: write `...Data...` into range `[reseveredOffset, reseveredOffset + size)`. This interface is allowed to be called concurrently. +2. `Insert(reservedOffset, size, ...Data...)`: write `...Data...` into range `[reservedOffset, reservedOffset + size)`. This interface is allowed to be called concurrently. 1. `...Data...` contains row_ids, timestamps two system attributes, and other columns 2. data columns can be stored either row-based or column-based. - 3. `PreDelete & Delete(reseveredOffset, row_ids, timestamps)` is a delete interface similar to insert interface. + 3. `PreDelete & Delete(reservedOffset, row_ids, timestamps)` is a delete interface similar to insert interface. Growing segment stores data in the form of chunk. The number of rows in each chunk is restricted by configs. diff --git a/docs/developer_guides/appendix_a_basic_components.md b/docs/developer_guides/appendix_a_basic_components.md index cc4b42b708846..62eee6999d36a 100644 --- a/docs/developer_guides/appendix_a_basic_components.md +++ b/docs/developer_guides/appendix_a_basic_components.md @@ -107,7 +107,7 @@ type Session struct { } // NewSession is a helper to build Session object. -// ServerID, ServerName, Address, Exclusive will be assigned after registeration. +// ServerID, ServerName, Address, Exclusive will be assigned after registration. // metaRoot is a path in etcd to save session information. // etcdEndpoints is to init etcdCli when NewSession func NewSession(ctx context.Context, metaRoot string, etcdEndpoints []string) *Session {} diff --git a/docs/developer_guides/chap04_message_stream.md b/docs/developer_guides/chap04_message_stream.md index 3a6e0004bd62f..e2822477a2e6c 100644 --- a/docs/developer_guides/chap04_message_stream.md +++ b/docs/developer_guides/chap04_message_stream.md @@ -7,7 +7,7 @@ ```go type Client interface { CreateChannels(req CreateChannelRequest) (CreateChannelResponse, error) - DestoryChannels(req DestoryChannelRequest) error + DestroyChannels(req DestroyChannelRequest) error DescribeChannels(req DescribeChannelRequest) (DescribeChannelResponse, error) } ``` @@ -32,10 +32,10 @@ type CreateChannelResponse struct { } ``` -- _DestoryChannels_ +- _DestroyChannels_ ```go -type DestoryChannelRequest struct { +type DestroyChannelRequest struct { ChannelNames []string } ``` diff --git a/docs/developer_guides/chap05_proxy.md b/docs/developer_guides/chap05_proxy.md index 15240c1606e4f..8bff965ea4338 100644 --- a/docs/developer_guides/chap05_proxy.md +++ b/docs/developer_guides/chap05_proxy.md @@ -105,7 +105,7 @@ type MilvusService interface { CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) - LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) + LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitionRequest) (*commonpb.Status, error) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) @@ -225,7 +225,7 @@ type CollectionSchema struct { Fields []*FieldSchema } -type LoadPartitonRequest struct { +type LoadPartitionRequest struct { Base *commonpb.MsgBase DbID UniqueID CollectionID UniqueID diff --git a/docs/developer_guides/chap07_query_coordinator.md b/docs/developer_guides/chap07_query_coordinator.md index 067b2d534bca5..2bfba980be175 100644 --- a/docs/developer_guides/chap07_query_coordinator.md +++ b/docs/developer_guides/chap07_query_coordinator.md @@ -134,7 +134,7 @@ type PartitionStatesResponse struct { - _LoadPartitions_ ```go -type LoadPartitonRequest struct { +type LoadPartitionRequest struct { Base *commonpb.MsgBase DbID UniqueID CollectionID UniqueID diff --git a/docs/user_guides/tls_proxy.md b/docs/user_guides/tls_proxy.md index a359dff917010..63251f7ca92b1 100644 --- a/docs/user_guides/tls_proxy.md +++ b/docs/user_guides/tls_proxy.md @@ -78,7 +78,7 @@ certs = $dir/certs # Where the issued certs are kept crl_dir = $dir/crl # Where the issued crl are kept database = $dir/index.txt # database index file. #unique_subject = no # Set to 'no' to allow creation of - # several ctificates with same subject. + # several certificates with same subject. new_certs_dir = $dir/newcerts # default place for new certs. certificate = $dir/cacert.pem # The CA certificate @@ -89,7 +89,7 @@ crl = $dir/crl.pem # The current CRL private_key = $dir/private/cakey.pem# The private key RANDFILE = $dir/private/.rand # private random number file -x509_extensions = usr_cert # The extentions to add to the cert +x509_extensions = usr_cert # The extensions to add to the cert # Comment out the following two lines for the "traditional" # (and highly broken) format. @@ -141,7 +141,7 @@ default_bits = 2048 default_keyfile = privkey.pem distinguished_name = req_distinguished_name attributes = req_attributes -x509_extensions = v3_ca # The extentions to add to the self signed cert +x509_extensions = v3_ca # The extensions to add to the self signed cert # Passwords for private keys if not present they will be prompted for # input_password = secret