From 8ce24649ef38fded3e34ba56395ca3ed2f5f1fec Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Mon, 15 Jan 2024 14:54:08 +0900 Subject: [PATCH 1/2] go.mod: gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf This revision is compatible with Go 1.20, 1.21, and 1.22rc1. v0.0.0-20231024051821-3b00acd67af2 is no longer compatible with Go 1.20. Signed-off-by: Akihiro Suda --- go.mod | 10 +- go.sum | 22 +- vendor/github.com/google/btree/.travis.yml | 1 - vendor/github.com/google/btree/README.md | 2 - vendor/github.com/google/btree/btree.go | 3 + .../github.com/google/btree/btree_generic.go | 1083 +++++++++++++++++ vendor/golang.org/x/time/AUTHORS | 3 - vendor/golang.org/x/time/CONTRIBUTORS | 3 - vendor/golang.org/x/time/rate/rate.go | 123 +- vendor/golang.org/x/time/rate/sometimes.go | 67 + .../x/tools/cmd/stringer/stringer.go | 5 +- vendor/golang.org/x/tools/go/packages/doc.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 7 +- .../x/tools/go/packages/packages.go | 3 +- .../x/tools/internal/gcimporter/gcimporter.go | 3 +- .../gvisor.dev/gvisor/pkg/rand/rand_linux.go | 13 +- vendor/gvisor.dev/gvisor/pkg/sync/fence.go | 19 + .../gvisor.dev/gvisor/pkg/sync/fence_amd64.s | 26 + .../checksum_amd64.go => sync/fence_arm64.s} | 15 +- .../gvisor/pkg/sync/runtime_go121_unsafe.go | 16 + .../pkg/sync/runtime_not_go121_unsafe.go | 18 + .../gvisor/pkg/sync/runtime_unsafe.go | 18 +- vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go | 18 +- .../gvisor/pkg/tcpip/checksum/checksum.go | 115 -- .../pkg/tcpip/checksum/checksum_amd64.s | 138 --- .../checksum/checksum_amd64_state_autogen.go | 6 - .../tcpip/checksum/checksum_noasm_unsafe.go | 80 -- .../pkg/tcpip/checksum/checksum_unsafe.go | 182 +++ .../checksum/checksum_unsafe_state_autogen.go | 3 - .../tcpip/stack/addressable_endpoint_state.go | 39 +- .../gvisor/pkg/tcpip/stack/registration.go | 4 +- .../gvisor/pkg/tcpip/stack/route.go | 2 +- .../gvisor/pkg/tcpip/stack/stack.go | 66 +- vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go | 5 + .../pkg/tcpip/transport/tcp/endpoint.go | 19 +- .../pkg/tcpip/transport/tcp/forwarder.go | 1 + .../pkg/tcpip/transport/udp/endpoint.go | 2 + vendor/modules.txt | 14 +- 38 files changed, 1646 insertions(+), 510 deletions(-) delete mode 100644 vendor/github.com/google/btree/.travis.yml create mode 100644 vendor/github.com/google/btree/btree_generic.go delete mode 100644 vendor/golang.org/x/time/AUTHORS delete mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/rate/sometimes.go create mode 100644 vendor/gvisor.dev/gvisor/pkg/sync/fence.go create mode 100644 vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s rename vendor/gvisor.dev/gvisor/pkg/{tcpip/checksum/checksum_amd64.go => sync/fence_arm64.s} (69%) create mode 100644 vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go create mode 100644 vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go delete mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s delete mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go delete mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go create mode 100644 vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go diff --git a/go.mod b/go.mod index 748e2e90b..74fd15981 100644 --- a/go.mod +++ b/go.mod @@ -25,25 +25,25 @@ require ( golang.org/x/crypto v0.17.0 golang.org/x/sync v0.5.0 golang.org/x/sys v0.15.0 - gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db + gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/u-root/uio v0.0.0-20210528114334-82958018845c // indirect github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.14.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 9d31f8aed..c45d86b3b 100644 --- a/go.sum +++ b/go.sum @@ -27,8 +27,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -114,8 +114,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -165,15 +165,15 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -184,7 +184,7 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.28.2-0.20230118093459-a9481185b34d h1:qp0AnQCvRCMlu9jBjtdbTaaEmThIgZOrbVyDEOcmKhQ= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -196,7 +196,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db h1:WZSmkyu/hep9YhWIlBZefwGVBrnGE5yW8JPD56YRsXs= -gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db/go.mod h1:sQuqOkxbfJq/GS2uSnqHphtXclHyk/ZrAGhZBxxsq6g= +gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf h1:0A28IFBR6VcMacM0m6Rn5/nr8pk8xa2TyIkjSaFAOPc= +gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8= inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0 h1:PqdHrvQRVK1zapJkd0qf6+tevvSIcWdfenVqJd3PHWU= inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0/go.mod h1:Tojt5kmHpDIR2jMojxzZK2w2ZR7OILODmUo2gaSwjrk= diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dac..eab5dbf7b 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6..969b910d7 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 000000000..e44a0f488 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c..f0e0cf3cb 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -369,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 998d1a51b..2b19c93e8 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -188,6 +188,8 @@ type Generator struct { trimPrefix string lineComment bool + + logf func(format string, args ...interface{}) // test logging hook; nil when not testing } func (g *Generator) Printf(format string, args ...interface{}) { @@ -221,13 +223,14 @@ func (g *Generator) parsePackage(patterns []string, tags []string) { // in a separate pass? For later. Tests: false, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + Logf: g.logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } if len(pkgs) != 1 { - log.Fatalf("error: %d packages found", len(pkgs)) + log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) } g.addPackage(pkgs[0]) } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe..a7a8f73e3 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -35,7 +35,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index b5de9cf9f..1f1eade0a 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "log" "os" "path" @@ -1109,7 +1108,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1128,7 +1127,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1146,7 +1145,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 124a6fe14..ece0e7c60 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -1127,7 +1126,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index b1223713b..2d078ccb1 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -29,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -221,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } diff --git a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go index fa6a21026..fd5fa5d6a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go +++ b/vendor/gvisor.dev/gvisor/pkg/rand/rand_linux.go @@ -54,10 +54,17 @@ type bufferedReader struct { // Read implements io.Reader.Read. func (b *bufferedReader) Read(p []byte) (int, error) { + // In Linux, reads of up to page size bytes will always complete fully. + // See drivers/char/random.c:get_random_bytes_user(). + // NOTE(gvisor.dev/issue/9445): Some applications rely on this behavior. + const pageSize = 4096 + min := len(p) + if min > pageSize { + min = pageSize + } b.mu.Lock() - n, err := b.r.Read(p) - b.mu.Unlock() - return n, err + defer b.mu.Unlock() + return io.ReadAtLeast(b.r, p, min) } // Reader is the default reader. diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/fence.go b/vendor/gvisor.dev/gvisor/pkg/sync/fence.go new file mode 100644 index 000000000..6706676a5 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence.go @@ -0,0 +1,19 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +// MemoryFenceReads ensures that all preceding memory loads happen before +// following memory loads. +func MemoryFenceReads() diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s b/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s new file mode 100644 index 000000000..87766f1d3 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence_amd64.s @@ -0,0 +1,26 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build amd64 +// +build amd64 + +#include "textflag.h" + +// func MemoryFenceReads() +TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0 + // No memory fence is required on x86. However, a compiler fence is + // required to prevent the compiler from reordering memory accesses. The Go + // compiler will not reorder memory accesses around a call to an assembly + // function; compare runtime.publicationBarrier. + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go b/vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s similarity index 69% rename from vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go rename to vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s index 21ecd12a8..f4f9ce9de 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/fence_arm64.s @@ -12,13 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build amd64 -// +build amd64 +//go:build arm64 +// +build arm64 -package checksum +#include "textflag.h" -// Note: odd indicates whether initial is a partial checksum over an odd number -// of bytes. -// -// calculateChecksum is defined in assembly. -func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) +// func MemoryFenceReads() +TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0 + DMB $0x9 // ISHLD + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go new file mode 100644 index 000000000..344b55663 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_go121_unsafe.go @@ -0,0 +1,16 @@ +// Copyright 2023 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package sync + +import ( + "unsafe" +) + +// Use checkoffset to assert that maptype.hasher (the only field we use) has +// the correct offset. +const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset internal/abi MapType.Hasher diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go new file mode 100644 index 000000000..4d7e8b9fb --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_not_go121_unsafe.go @@ -0,0 +1,18 @@ +// Copyright 2023 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// runtime.maptype is moved to internal/abi.MapType in Go 1.21. +// +//go:build !go1.21 + +package sync + +import ( + "unsafe" +) + +// Use checkoffset to assert that maptype.hasher (the only field we use) has +// the correct offset. +const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset runtime maptype.hasher diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go index 91cda67bb..a298bddbc 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go @@ -3,16 +3,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && !go1.22 -// +build go1.18,!go1.22 - -// //go:linkname directives type-checked by checklinkname. Any other -// non-linkname assumptions outside the Go 1 compatibility guarantee should -// have an accompanied vet check or version guard build tag. - -// Check type definitions and constants when updating Go version. -// -// TODO(b/165820485): add these checks to checklinkname. +// //go:linkname directives type-checked by checklinkname. +// Runtime type copies checked by checkoffset. package sync @@ -107,10 +99,10 @@ func MapKeyHasher(m any) func(unsafe.Pointer, uintptr) uintptr { panic(fmt.Sprintf("sync.MapKeyHasher: m is %v, not map", rtyp)) } mtyp := *(**maptype)(unsafe.Pointer(&m)) - return mtyp.hasher + return mtyp.Hasher } -// maptype is equivalent to the beginning of runtime.maptype. +// maptype is equivalent to the beginning of internal/abi.MapType. type maptype struct { size uintptr ptrdata uintptr @@ -126,7 +118,7 @@ type maptype struct { key unsafe.Pointer elem unsafe.Pointer bucket unsafe.Pointer - hasher func(unsafe.Pointer, uintptr) uintptr + Hasher func(unsafe.Pointer, uintptr) uintptr // more fields } diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go b/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go index 9adc95322..c90d2d9fa 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/seqcount.go @@ -39,23 +39,6 @@ type SeqCount struct { // SeqCountEpoch tracks writer critical sections in a SeqCount. type SeqCountEpoch uint32 -// We assume that: -// -// - All functions in sync/atomic that perform a memory read are at least a -// read fence: memory reads before calls to such functions cannot be reordered -// after the call, and memory reads after calls to such functions cannot be -// reordered before the call, even if those reads do not use sync/atomic. -// -// - All functions in sync/atomic that perform a memory write are at least a -// write fence: memory writes before calls to such functions cannot be -// reordered after the call, and memory writes after calls to such functions -// cannot be reordered before the call, even if those writes do not use -// sync/atomic. -// -// As of this writing, the Go memory model completely fails to describe -// sync/atomic, but these properties are implied by -// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8. - // BeginRead indicates the beginning of a reader critical section. Reader // critical sections DO NOT BLOCK writer critical sections, so operations in a // reader critical section MAY RACE with writer critical sections. Races are @@ -104,6 +87,7 @@ func (s *SeqCount) beginReadSlow() SeqCountEpoch { // Reader critical sections do not need to be explicitly terminated; the last // call to ReadOk is implicitly the end of the reader critical section. func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool { + MemoryFenceReads() return atomic.LoadUint32(&s.epoch) == uint32(epoch) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go index fa6e8bc3a..5d4e1170e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go @@ -30,121 +30,6 @@ func Put(b []byte, xsum uint16) { binary.BigEndian.PutUint16(b, xsum) } -func unrolledCalculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { - v := uint32(initial) - - if odd { - v += uint32(buf[0]) - buf = buf[1:] - } - - l := len(buf) - odd = l&1 != 0 - if odd { - l-- - v += uint32(buf[l]) << 8 - } - for (l - 64) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[64:] - l = l - 64 - } - if (l - 32) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - i += 16 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[32:] - l = l - 32 - } - if (l - 16) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9]) - v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11]) - v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13]) - v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15]) - buf = buf[16:] - l = l - 16 - } - if (l - 8) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5]) - v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7]) - buf = buf[8:] - l = l - 8 - } - if (l - 4) >= 0 { - i := 0 - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3]) - buf = buf[4:] - l = l - 4 - } - - // At this point since l was even before we started unrolling - // there can be only two bytes left to add. - if l != 0 { - v += (uint32(buf[0]) << 8) + uint32(buf[1]) - } - - return Combine(uint16(v), uint16(v>>16)), odd -} - // Checksum calculates the checksum (as defined in RFC 1071) of the bytes in the // given byte array. This function uses an optimized version of the checksum // algorithm. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s deleted file mode 100644 index c2a97a647..000000000 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2023 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build amd64 -// +build amd64 - -#include "textflag.h" - -// calculateChecksum computes the checksum of a slice, taking into account a -// previously computed initial value and whether the first byte is a lower or -// upper byte. -// -// It utilizes byte order independence and parallel summation as described in -// RFC 1071 1.2. -// -// The best way to understand this function is to understand -// checksum_noasm_unsafe.go first, which implements largely the same logic. -// Using assembly speeds things up via ADC (add with carry). -TEXT ·calculateChecksum(SB),NOSPLIT|NOFRAME,$0-35 - // Store arguments in registers. - MOVW initial+26(FP), AX - MOVQ buf_len+8(FP), BX - MOVQ buf_base+0(FP), CX - XORQ R8, R8 - MOVB odd+24(FP), R8 - - // Account for a previous odd number of bytes. - // - // if odd { - // initial += buf[0] - // buf = buf[1:] - // } - CMPB R8, $0 - JE newlyodd - XORQ R9, R9 - MOVB (CX), R9 - ADDW R9, AX - ADCW $0, AX - INCQ CX - DECQ BX - - // See whether we're checksumming an odd number of bytes. If so, the final - // byte is a big endian most significant byte, and so needs to be shifted. - // - // odd = buf_len%2 != 0 - // if odd { - // buf_len-- - // initial += buf[buf_len]<<8 - // } -newlyodd: - XORQ R8, R8 - TESTQ $1, BX - JZ swaporder - MOVB $1, R8 - DECQ BX - XORQ R10, R10 - MOVB (CX)(BX*1), R10 - SHLQ $8, R10 - ADDW R10, AX - ADCW $0, AX - -swaporder: - // Load initial in network byte order. - BSWAPQ AX - SHRQ $48, AX - - // Accumulate 8 bytes at a time. - // - // while buf_len >= 8 { - // acc, carry = acc + *(uint64 *)(buf) + carry - // buf_len -= 8 - // buf = buf[8:] - // } - // acc += carry - JMP addcond -addloop: - ADDQ (CX), AX - ADCQ $0, AX - SUBQ $8, BX - ADDQ $8, CX -addcond: - CMPQ BX, $8 - JAE addloop - - // TODO(krakauer): We can do 4 byte accumulation too. - - // Accumulate the rest 2 bytes at a time. - // - // while buf_len > 0 { - // acc, carry = acc + *(uint16 *)(buf) - // buf_len -= 2 - // buf = buf[2:] - // } - JMP slowaddcond -slowaddloop: - XORQ DX, DX - MOVW (CX), DX - ADDQ DX, AX - ADCQ $0, AX - SUBQ $2, BX - ADDQ $2, CX -slowaddcond: - CMPQ BX, $2 - JAE slowaddloop - - // Fold into 16 bits. - // - // for acc > math.MaxUint16 { - // acc = (acc & 0xffff) + acc>>16 - // } - JMP foldcond -foldloop: - MOVQ AX, DX - ANDQ $0xffff, DX - SHRQ $16, AX - ADDQ DX, AX - // We don't need ADC because folding will take care of it -foldcond: - CMPQ AX, $0xffff - JA foldloop - - // Return the checksum in host byte order. - BSWAPQ AX - SHRQ $48, AX - MOVW AX, ret+32(FP) - MOVB R8, ret1+34(FP) - RET diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go deleted file mode 100644 index 4d4b3d578..000000000 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go +++ /dev/null @@ -1,6 +0,0 @@ -// automatically generated by stateify. - -//go:build amd64 -// +build amd64 - -package checksum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go deleted file mode 100644 index dee24e196..000000000 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2023 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !amd64 -// +build !amd64 - -package checksum - -import ( - "math" - "math/bits" - "unsafe" -) - -// Note: odd indicates whether initial is a partial checksum over an odd number -// of bytes. -func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { - // Note: we can probably remove unrolledCalculateChecksum altogether, - // but I don't have any 32 bit machines to benchmark on. - if bits.UintSize != 64 { - return unrolledCalculateChecksum(buf, odd, initial) - } - - // Utilize byte order independence and parallel summation as - // described in RFC 1071 1.2. - - // It doesn't matter what endianness we use, only that it's - // consistent throughout the calculation. See RFC 1071 1.2.B. - acc := uint(((initial & 0xff00) >> 8) | ((initial & 0x00ff) << 8)) - - // Account for initial having been calculated over an odd number of - // bytes. - if odd { - acc += uint(buf[0]) << 8 - buf = buf[1:] - } - - // See whether we're checksumming an odd number of bytes. If - // so, the final byte is a big endian most significant byte. - odd = len(buf)%2 != 0 - if odd { - acc += uint(buf[len(buf)-1]) - buf = buf[:len(buf)-1] - } - - // Compute the checksum 8 bytes at a time. - var carry uint - for len(buf) >= 8 { - acc, carry = bits.Add(acc, *(*uint)(unsafe.Pointer(&buf[0])), carry) - buf = buf[8:] - } - - // Compute the remainder 2 bytes at a time. We are guaranteed that - // len(buf) is even due to the above handling of odd-length buffers. - for len(buf) > 0 { - acc, carry = bits.Add(acc, uint(*(*uint16)(unsafe.Pointer(&buf[0]))), carry) - buf = buf[2:] - } - acc += carry - - // Fold the checksum into 16 bits. - for acc > math.MaxUint16 { - acc = (acc & 0xffff) + acc>>16 - } - - // Swap the byte order before returning. - acc = ((acc & 0xff00) >> 8) | ((acc & 0x00ff) << 8) - return uint16(acc), odd -} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go new file mode 100644 index 000000000..66b7ab679 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe.go @@ -0,0 +1,182 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checksum + +import ( + "encoding/binary" + "math/bits" + "unsafe" +) + +// Note: odd indicates whether initial is a partial checksum over an odd number +// of bytes. +func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { + // Use a larger-than-uint16 accumulator to benefit from parallel summation + // as described in RFC 1071 1.2.C. + acc := uint64(initial) + + // Handle an odd number of previously-summed bytes, and get the return + // value for odd. + if odd { + acc += uint64(buf[0]) + buf = buf[1:] + } + odd = len(buf)&1 != 0 + + // Aligning &buf[0] below is much simpler if len(buf) >= 8; special-case + // smaller bufs. + if len(buf) < 8 { + if len(buf) >= 4 { + acc += (uint64(buf[0]) << 8) + uint64(buf[1]) + acc += (uint64(buf[2]) << 8) + uint64(buf[3]) + buf = buf[4:] + } + if len(buf) >= 2 { + acc += (uint64(buf[0]) << 8) + uint64(buf[1]) + buf = buf[2:] + } + if len(buf) >= 1 { + acc += uint64(buf[0]) << 8 + // buf = buf[1:] is skipped because it's unused and nogo will + // complain. + } + return reduce(acc), odd + } + + // On little-endian architectures, multi-byte loads from buf will load + // bytes in the wrong order. Rather than byte-swap after each load (slow), + // we byte-swap the accumulator before summing any bytes and byte-swap it + // back before returning, which still produces the correct result as + // described in RFC 1071 1.2.B "Byte Order Independence". + // + // acc is at most a uint16 + a uint8, so its upper 32 bits must be 0s. We + // preserve this property by byte-swapping only the lower 32 bits of acc, + // so that additions to acc performed during alignment can't overflow. + acc = uint64(bswapIfLittleEndian32(uint32(acc))) + + // Align &buf[0] to an 8-byte boundary. + bswapped := false + if sliceAddr(buf)&1 != 0 { + // Compute the rest of the partial checksum with bytes swapped, and + // swap back before returning; see the last paragraph of + // RFC 1071 1.2.B. + acc = uint64(bits.ReverseBytes32(uint32(acc))) + bswapped = true + // No `<< 8` here due to the byte swap we just did. + acc += uint64(bswapIfLittleEndian16(uint16(buf[0]))) + buf = buf[1:] + } + if sliceAddr(buf)&2 != 0 { + acc += uint64(*(*uint16)(unsafe.Pointer(&buf[0]))) + buf = buf[2:] + } + if sliceAddr(buf)&4 != 0 { + acc += uint64(*(*uint32)(unsafe.Pointer(&buf[0]))) + buf = buf[4:] + } + + // Sum 64 bytes at a time. Beyond this point, additions to acc may + // overflow, so we have to handle carrying. + for len(buf) >= 64 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[32])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[40])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[48])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[56])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[64:] + } + + // Sum the remaining 0-63 bytes. + if len(buf) >= 32 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[32:] + } + if len(buf) >= 16 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[16:] + } + if len(buf) >= 8 { + var carry uint64 + acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[8:] + } + if len(buf) >= 4 { + var carry uint64 + acc, carry = bits.Add64(acc, uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[4:] + } + if len(buf) >= 2 { + var carry uint64 + acc, carry = bits.Add64(acc, uint64(*(*uint16)(unsafe.Pointer(&buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + buf = buf[2:] + } + if len(buf) >= 1 { + // bswapIfBigEndian16(buf[0]) == bswapIfLittleEndian16(buf[0]<<8). + var carry uint64 + acc, carry = bits.Add64(acc, uint64(bswapIfBigEndian16(uint16(buf[0]))), 0) + acc, _ = bits.Add64(acc, 0, carry) + // buf = buf[1:] is skipped because it's unused and nogo will complain. + } + + // Reduce the checksum to 16 bits and undo byte swaps before returning. + acc16 := bswapIfLittleEndian16(reduce(acc)) + if bswapped { + acc16 = bits.ReverseBytes16(acc16) + } + return acc16, odd +} + +func reduce(acc uint64) uint16 { + // Ideally we would do: + // return uint16(acc>>48) +' uint16(acc>>32) +' uint16(acc>>16) +' uint16(acc) + // for more instruction-level parallelism; however, there is no + // bits.Add16(). + acc = (acc >> 32) + (acc & 0xffff_ffff) // at most 0x1_ffff_fffe + acc32 := uint32(acc>>32 + acc) // at most 0xffff_ffff + acc32 = (acc32 >> 16) + (acc32 & 0xffff) // at most 0x1_fffe + return uint16(acc32>>16 + acc32) // at most 0xffff +} + +func bswapIfLittleEndian32(val uint32) uint32 { + return binary.BigEndian.Uint32((*[4]byte)(unsafe.Pointer(&val))[:]) +} + +func bswapIfLittleEndian16(val uint16) uint16 { + return binary.BigEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:]) +} + +func bswapIfBigEndian16(val uint16) uint16 { + return binary.LittleEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:]) +} + +func sliceAddr(buf []byte) uintptr { + return uintptr(unsafe.Pointer(unsafe.SliceData(buf))) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go index b5135383b..936aef749 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go @@ -1,6 +1,3 @@ // automatically generated by stateify. -//go:build !amd64 -// +build !amd64 - package checksum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go index 91b615eb6..f472bbf0a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/addressable_endpoint_state.go @@ -18,6 +18,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" ) func (lifetimes *AddressLifetimes) sanitize() { @@ -433,7 +434,7 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix { a.mu.RLock() defer a.mu.RUnlock() - ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool { + ep := a.acquirePrimaryAddressRLocked(tcpip.Address{}, func(ep *addressState) bool { switch kind := ep.GetKind(); kind { case Permanent: return a.networkEndpoint.Enabled() || !a.options.HiddenWhileDisabled @@ -461,7 +462,29 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix { // valid according to isValid. // // +checklocksread:a.mu -func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*addressState) bool) *addressState { +func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr tcpip.Address, isValid func(*addressState) bool) *addressState { + // TODO: Move this out into IPv4-specific code. + // IPv6 handles source IP selection elsewhere. We have to do source + // selection only for IPv4, in which case ep is never deprecated. Thus + // we don't have to worry about refcounts. + if remoteAddr.Len() == header.IPv4AddressSize && remoteAddr != (tcpip.Address{}) { + var best *addressState + var bestLen uint8 + for _, state := range a.primary { + if !isValid(state) { + continue + } + stateLen := state.addr.Address.MatchingPrefix(remoteAddr) + if best == nil || bestLen < stateLen { + best = state + bestLen = stateLen + } + } + if best != nil && best.TryIncRef() { + return best + } + } + var deprecatedEndpoint *addressState for _, ep := range a.primary { if !isValid(ep) { @@ -469,7 +492,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad } if !ep.Deprecated() { - if ep.IncRef() { + if ep.TryIncRef() { // ep is not deprecated, so return it immediately. // // If we kept track of a deprecated endpoint, decrement its reference @@ -486,7 +509,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad return ep } - } else if deprecatedEndpoint == nil && ep.IncRef() { + } else if deprecatedEndpoint == nil && ep.TryIncRef() { // We prefer an endpoint that is not deprecated, but we keep track of // ep in case a doesn't have any non-deprecated endpoints. // @@ -518,7 +541,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc return nil } - if !addrState.IncRef() { + if !addrState.TryIncRef() { panic(fmt.Sprintf("failed to increase the reference count for address = %s", addrState.addr)) } @@ -527,7 +550,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc if f != nil { for _, addrState := range a.endpoints { - if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.IncRef() { + if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.TryIncRef() { return addrState } } @@ -599,7 +622,7 @@ func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpi a.mu.Lock() defer a.mu.Unlock() - ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool { + ep := a.acquirePrimaryAddressRLocked(remoteAddr, func(ep *addressState) bool { return ep.IsAssigned(allowExpired) }) @@ -782,7 +805,7 @@ func (a *addressState) IsAssigned(allowExpired bool) bool { } // IncRef implements AddressEndpoint. -func (a *addressState) IncRef() bool { +func (a *addressState) TryIncRef() bool { return a.refs.TryIncRef() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go index 4cd720820..bbfe1a797 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go @@ -534,11 +534,11 @@ type AssignableAddressEndpoint interface { // to its NetworkEndpoint. IsAssigned(allowExpired bool) bool - // IncRef increments this endpoint's reference count. + // TryIncRef tries to increment this endpoint's reference count. // // Returns true if it was successfully incremented. If it returns false, then // the endpoint is considered expired and should no longer be used. - IncRef() bool + TryIncRef() bool // DecRef decrements this endpoint's reference count. DecRef() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go index 9755362af..32ce9a7f0 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go @@ -537,7 +537,7 @@ func (r *Route) Acquire() { // +checklocksread:r.mu func (r *Route) acquireLocked() { if ep := r.localAddressEndpoint; ep != nil { - if !ep.IncRef() { + if !ep.TryIncRef() { panic(fmt.Sprintf("failed to increment reference count for local address endpoint = %s", r.LocalAddress())) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go index 382fcdd49..e3a7f788b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go @@ -1290,6 +1290,28 @@ func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool { } } +// findRouteWithLocalAddrFromAnyInterfaceRLocked returns a route to the given +// destination address, leaving through the given NIC. +// +// Rather than preferring to find a route that uses a local address assigned to +// the outgoing interface, it finds any NIC that holds a matching local address +// endpoint. +// +// +checklocksread:s.mu +func (s *Stack) findRouteWithLocalAddrFromAnyInterfaceRLocked(outgoingNIC *nic, localAddr, remoteAddr, gateway tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) *Route { + for _, aNIC := range s.nics { + addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto) + if addressEndpoint == nil { + continue + } + + if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, outgoingNIC, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil { + return r + } + } + return nil +} + // FindRoute creates a route to the given destination address, leaving through // the given NIC and local address (if provided). // @@ -1305,6 +1327,11 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n s.mu.RLock() defer s.mu.RUnlock() + // Reject attempts to use unsupported protocols. + if !s.CheckNetworkProtocol(netProto) { + return nil, &tcpip.ErrUnknownProtocol{} + } + isLinkLocal := header.IsV6LinkLocalUnicastAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr) isLocalBroadcast := remoteAddr == header.IPv4Broadcast isMulticast := header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr) @@ -1374,15 +1401,27 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n } } - // If the stack has forwarding enabled and we haven't found a valid route - // to the remote address yet, keep track of the first valid route. We - // keep iterating because we prefer routes that let us use a local - // address that is assigned to the outgoing interface. There is no - // requirement to do this from any RFC but simply a choice made to better - // follow a strong host model which the netstack follows at the time of - // writing. + // If the stack has forwarding enabled, we haven't found a valid route to + // the remote address yet, and we are routing locally generated traffic, + // keep track of the first valid route. We keep iterating because we + // prefer routes that let us use a local address that is assigned to the + // outgoing interface. There is no requirement to do this from any RFC + // but simply a choice made to better follow a strong host model which + // the netstack follows at the time of writing. + // + // Note that for incoming traffic that we are forwarding (for which the + // NIC and local address are unspecified), we do not keep iterating, as + // there is no reason to prefer routes that let us use a local address + // when routing forwarded (as opposed to locally-generated) traffic. + locallyGenerated := (id != 0 || localAddr != tcpip.Address{}) if onlyGlobalAddresses && chosenRoute.Equal(tcpip.Route{}) && isNICForwarding(nic, netProto) { - chosenRoute = route + if locallyGenerated { + chosenRoute = route + continue + } + if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, route.Gateway, netProto, multicastLoop); r != nil { + return r + } } } @@ -1422,15 +1461,8 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n if id == 0 { // If an interface is not specified, try to find a NIC that holds the local // address endpoint to construct a route. - for _, aNIC := range s.nics { - addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto) - if addressEndpoint == nil { - continue - } - - if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil { - return r, nil - } + if r := s.findRouteWithLocalAddrFromAnyInterfaceRLocked(nic, localAddr, remoteAddr, gateway, netProto, multicastLoop); r != nil { + return r, nil } } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go index 92ac54db4..4c0845ad9 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go @@ -2157,6 +2157,11 @@ type TCPStats struct { // SpuriousRTORecovery is the number of spurious RTOs. SpuriousRTORecovery *StatCounter + + // ForwardMaxInFlightDrop is the number of connection requests that are + // dropped due to exceeding the maximum number of in-flight connection + // requests. + ForwardMaxInFlightDrop *StatCounter } // UDPStats collects UDP-specific stats. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go index 7ae7043ec..7b0af1759 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go @@ -2059,11 +2059,16 @@ func (e *endpoint) GetSockOptInt(opt tcpip.SockOptInt) (int, tcpip.Error) { return v, nil case tcpip.MaxSegOption: - // This is just stubbed out. Linux never returns the user_mss - // value as it either returns the defaultMSS or returns the - // actual current MSS. Netstack just returns the defaultMSS - // always for now. + // Linux only returns user_mss value if user_mss is set and the socket is + // unconnected. Otherwise Linux returns the actual current MSS. Netstack + // mimics the user_mss behavior, but otherwise just returns the defaultMSS + // for now. v := header.TCPDefaultMSS + e.LockUser() + if state := e.EndpointState(); e.userMSS > 0 && (state.internal() || state == StateClose || state == StateListen) { + v = int(e.userMSS) + } + e.UnlockUser() return v, nil case tcpip.MTUDiscoverOption: @@ -2599,14 +2604,14 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { // Listen puts the endpoint in "listen" mode, which allows it to accept // new connections. func (e *endpoint) Listen(backlog int) tcpip.Error { - err := e.listen(backlog) - if err != nil { + if err := e.listen(backlog); err != nil { if !err.IgnoreStats() { e.stack.Stats().TCP.FailedConnectionAttempts.Increment() e.stats.FailedConnectionAttempts.Increment() } + return err } - return err + return nil } func (e *endpoint) listen(backlog int) tcpip.Error { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go index 3d632939d..0071093f2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/forwarder.go @@ -88,6 +88,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB // Ignore the segment if we're beyond the limit. if len(f.inFlight) >= f.maxInFlight { + f.stack.Stats().TCP.ForwardMaxInFlightDrop.Increment() return true } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go index a9948a1c6..eab6c95f4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go @@ -1025,6 +1025,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p } id := e.net.Info().ID + e.mu.RLock() e.SocketOptions().QueueErr(&tcpip.SockError{ Err: err, Cause: transErr, @@ -1041,6 +1042,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p }, NetProto: pkt.NetworkProtocolNumber, }) + e.mu.RUnlock() } // Notify of the error. diff --git a/vendor/modules.txt b/vendor/modules.txt index 56a89e7f8..6cd330a9c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -27,8 +27,8 @@ github.com/dustin/go-humanize # github.com/fsnotify/fsnotify v1.4.9 ## explicit; go 1.13 github.com/fsnotify/fsnotify -# github.com/google/btree v1.0.1 -## explicit; go 1.12 +# github.com/google/btree v1.1.2 +## explicit; go 1.18 github.com/google/btree # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 @@ -145,8 +145,8 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/mod v0.12.0 -## explicit; go 1.17 +# golang.org/x/mod v0.13.0 +## explicit; go 1.18 golang.org/x/mod/semver # golang.org/x/net v0.17.0 ## explicit; go 1.17 @@ -188,10 +188,10 @@ golang.org/x/text/internal/utf8internal golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/transform -# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.13.0 +# golang.org/x/tools v0.14.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -216,7 +216,7 @@ gopkg.in/tomb.v1 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db +# gvisor.dev/gvisor v0.0.0-20231023213702-2691a8f9b1cf ## explicit; go 1.20 gvisor.dev/gvisor/pkg/atomicbitops gvisor.dev/gvisor/pkg/bits From 4dcd46ba6031a282db9686a4a30ac7c4279c1855 Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Mon, 15 Jan 2024 14:57:12 +0900 Subject: [PATCH 2/2] ghactions: add Go 1.22rc1 Signed-off-by: Akihiro Suda --- .github/workflows/go.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 4c69d714b..f45b41833 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -11,13 +11,17 @@ jobs: build: runs-on: ubuntu-20.04 # explicitly use 20.04, see commit 428c40018a timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + go-version: ["1.20.x", "1.21.x", "1.22.0-rc.1"] steps: - uses: actions/checkout@v3 - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.20.x + go-version: ${{ matrix.go-version }} - name: Build run: |