diff --git a/go.mod b/go.mod index 82fbf3ea30..bb0824e61b 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cheggaaa/pb/v3 v3.1.4 - github.com/containers/gvisor-tap-vsock v0.6.2 + github.com/containers/gvisor-tap-vsock v0.7.1 github.com/containers/image/v5 v5.24.1 github.com/coreos/go-systemd/v22 v22.5.0 github.com/crc-org/admin-helper v0.0.12-0.20221012143549-fd5acd1c478e @@ -30,8 +30,8 @@ require ( github.com/kofalt/go-memoize v0.0.0-20220914132407-0b5d6a304579 github.com/mattn/go-colorable v0.1.13 github.com/mdlayher/vsock v1.2.1 - github.com/onsi/ginkgo/v2 v2.9.7 - github.com/onsi/gomega v1.27.8 + github.com/onsi/ginkgo/v2 v2.11.0 + github.com/onsi/gomega v1.27.10 github.com/opencontainers/image-spec v1.1.0-rc4 github.com/openshift/api v0.0.0-20230711143145-b5a47f95ba70 github.com/openshift/client-go v0.0.0-20230120202327-72f107311084 @@ -52,12 +52,12 @@ require ( github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 github.com/yusufpapurcu/wmi v1.2.3 github.com/zalando/go-keyring v0.2.3 - golang.org/x/crypto v0.11.0 - golang.org/x/net v0.12.0 + golang.org/x/crypto v0.13.0 + golang.org/x/net v0.15.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 - golang.org/x/term v0.10.0 - golang.org/x/text v0.11.0 + golang.org/x/sys v0.12.0 + golang.org/x/term v0.12.0 + golang.org/x/text v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.6 @@ -145,7 +145,7 @@ require ( github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/miekg/dns v1.1.56 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect @@ -156,7 +156,7 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20220706171101-8d0d6d41d096 // indirect + github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect github.com/openshift/library-go v0.0.0-20230227140230-39892725eed1 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect @@ -186,10 +186,10 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.mongodb.org/mongo-driver v1.11.1 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect - golang.org/x/mod v0.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/time v0.2.0 // indirect - golang.org/x/tools v0.9.1 // indirect + golang.org/x/tools v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.55.0 // indirect @@ -199,7 +199,7 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gvisor.dev/gvisor v0.0.0-20221216231429-a78e892a26d2 // indirect + gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db // indirect inet.af/tcpproxy v0.0.0-20220326234310-be3ee21c9fa0 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect diff --git a/go.sum b/go.sum index 44d1e1089f..e184a28b09 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containers/gvisor-tap-vsock v0.6.2 h1:wc/me5SXVlF2iOv5gjmjZARsJTggzpVoNO9dgC+2uRA= -github.com/containers/gvisor-tap-vsock v0.6.2/go.mod h1:vyqP7jqNzPudoYhnvj3Dw2lhsjQ3kf1/PyXLEaZsvb0= +github.com/containers/gvisor-tap-vsock v0.7.1 h1:+Rc+sOPplrkQb/BUXeN0ug8TxjgyrIqo/9P/eNS2A4c= +github.com/containers/gvisor-tap-vsock v0.7.1/go.mod h1:WSSsjcuYZkvP8i0J+Ht3LF8yvysn3krD5zxQ74wz7y0= github.com/containers/image/v5 v5.24.1 h1:XaRw3FJmvZtI297uBVTJluUVH4AQJ//YpHviaOw0C4M= github.com/containers/image/v5 v5.24.1/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -534,8 +534,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -572,10 +572,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss= -github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= -github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= @@ -584,8 +584,8 @@ github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVn github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20220706171101-8d0d6d41d096 h1:j0J5ownk4TZZfor2GNAXiJYd6BZYj1/TSKZtsVyRbRI= -github.com/opencontainers/runtime-spec v1.0.3-0.20220706171101-8d0d6d41d096/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w= +github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openshift/api v0.0.0-20230711143145-b5a47f95ba70 h1:fFU38cQYzz+gZAMfQ0wbxPpbWq5IFYPAa5nPAAaXX7w= github.com/openshift/api v0.0.0-20230711143145-b5a47f95ba70/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= @@ -812,8 +812,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -849,8 +849,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -899,8 +899,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1007,16 +1007,16 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1029,8 +1029,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1096,8 +1096,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1241,8 +1241,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gvisor.dev/gvisor v0.0.0-20221216231429-a78e892a26d2 h1:QN+Xh63jThYFN4CrcD4KXj+rUhevlb0LXEAlZ4m+qXQ= -gvisor.dev/gvisor v0.0.0-20221216231429-a78e892a26d2/go.mod h1:Dn5idtptoW1dIos9U6A2rpebLs/MtTwFacjKb8jLdQA= +gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db h1:WZSmkyu/hep9YhWIlBZefwGVBrnGE5yW8JPD56YRsXs= +gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db/go.mod h1:sQuqOkxbfJq/GS2uSnqHphtXclHyk/ZrAGhZBxxsq6g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/dhcp/dhcp.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/dhcp/dhcp.go index 3ad2553c3a..f464a31d6f 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/dhcp/dhcp.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/dhcp/dhcp.go @@ -82,7 +82,7 @@ func dial(s *stack.Stack, nic int) (*gonet.UDPConn, error) { if err := ep.Bind(tcpip.FullAddress{ NIC: tcpip.NICID(nic), - Addr: "", + Addr: tcpip.Address{}, Port: uint16(serverPort), }); err != nil { ep.Close() diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/ports.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/ports.go index 56d3080bfd..828c248b5b 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/ports.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/ports.go @@ -384,7 +384,7 @@ func tcpipAddress(nicID tcpip.NICID, remote string) (address tcpip.FullAddress, address = tcpip.FullAddress{ NIC: nicID, - Addr: tcpip.Address(net.ParseIP(split[0]).To4()), + Addr: tcpip.AddrFrom4Slice(net.ParseIP(split[0]).To4()), Port: uint16(port), } diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/tcp.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/tcp.go index 0bc15b2e6a..e60936ec41 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/tcp.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/services/forwarder/tcp.go @@ -57,6 +57,6 @@ func TCP(s *stack.Stack, nat map[tcpip.Address]tcpip.Address, natLock *sync.Mute func linkLocal() *tcpip.Subnet { _, parsedSubnet, _ := net.ParseCIDR(linkLocalSubnet) // CoreOS VM tries to connect to Amazon EC2 metadata service - subnet, _ := tcpip.NewSubnet(tcpip.Address(parsedSubnet.IP), tcpip.AddressMask(parsedSubnet.Mask)) + subnet, _ := tcpip.NewSubnet(tcpip.AddrFromSlice(parsedSubnet.IP), tcpip.MaskFromBytes(parsedSubnet.Mask)) return &subnet } diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/link.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/link.go index 9aebf8573d..b85ce43dde 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/link.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/link.go @@ -63,6 +63,8 @@ func (e *LinkEndpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber func (e *LinkEndpoint) AddHeader(_ stack.PacketBufferPtr) { } +func (e *LinkEndpoint) ParseHeader(stack.PacketBufferPtr) bool { return true } + func (e *LinkEndpoint) Capabilities() stack.LinkEndpointCapabilities { return stack.CapabilityResolutionRequired | stack.CapabilityRXChecksumOffload } @@ -109,7 +111,7 @@ func (e *LinkEndpoint) writePacket(r stack.RouteInfo, protocol tcpip.NetworkProt h := header.ARP(pkt.NetworkHeader().Slice()) if h.IsValid() && h.Op() == header.ARPReply { - ip := tcpip.Address(h.ProtocolAddressSender()).String() + ip := tcpip.AddrFromSlice(h.ProtocolAddressSender()).String() _, ok := e.virtualIPs[ip] if ip != e.IP() && !ok { log.Debugf("dropping spoofing packets from the gateway about IP %s", ip) diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/switch.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/switch.go index 8bde528c39..10fe59e3b7 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/switch.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/tap/switch.go @@ -13,7 +13,7 @@ import ( "github.com/google/gopacket/layers" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -168,14 +168,15 @@ func (e *Switch) txBuf(id int, conn protocolConn, buf []byte) error { size := conn.protocolImpl.(streamProtocol).Buf() conn.protocolImpl.(streamProtocol).Write(size, len(buf)) - if _, err := conn.Write(size); err != nil { + if _, err := conn.Write(append(size, buf...)); err != nil { + e.disconnect(id, conn) + return err + } + } else { + if _, err := conn.Write(buf); err != nil { e.disconnect(id, conn) return err } - } - if _, err := conn.Write(buf); err != nil { - e.disconnect(id, conn) - return err } return nil } @@ -261,7 +262,7 @@ func (e *Switch) rxBuf(_ context.Context, id int, buf []byte) { if eth.DestinationAddress() != e.gateway.LinkAddress() { pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ - Payload: bufferv2.MakeWithData(buf), + Payload: buffer.MakeWithData(buf), }) if err := e.tx(pkt); err != nil { log.Error(err) @@ -269,7 +270,7 @@ func (e *Switch) rxBuf(_ context.Context, id int, buf []byte) { pkt.DecRef() } if eth.DestinationAddress() == e.gateway.LinkAddress() || eth.DestinationAddress() == header.EthernetBroadcastAddress { - data := bufferv2.MakeWithData(buf) + data := buffer.MakeWithData(buf) data.TrimFront(header.EthernetMinimumSize) pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ Payload: data, diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen.go new file mode 100644 index 0000000000..a0909bb6bd --- /dev/null +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen.go @@ -0,0 +1,26 @@ +package transport + +import ( + "errors" + "net" + "net/url" +) + +func defaultListenURL(url *url.URL) (net.Listener, error) { + switch url.Scheme { + case "unix": + return net.Listen(url.Scheme, url.Path) + case "tcp": + return net.Listen("tcp", url.Host) + default: + return nil, errors.New("unexpected scheme") + } +} + +func Listen(endpoint string) (net.Listener, error) { + parsed, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + return listenURL(parsed) +} diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_darwin.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_darwin.go index 3fa9d00c5f..bea3042c21 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_darwin.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_darwin.go @@ -12,11 +12,7 @@ import ( const DefaultURL = "vsock://null:1024/vm_directory" -func Listen(endpoint string) (net.Listener, error) { - parsed, err := url.Parse(endpoint) - if err != nil { - return nil, err - } +func listenURL(parsed *url.URL) (net.Listener, error) { switch parsed.Scheme { case "vsock": port, err := strconv.Atoi(parsed.Port()) @@ -31,12 +27,8 @@ func Listen(endpoint string) (net.Listener, error) { Name: path, Net: "unix", }) - case "unix": - return net.Listen("unix", parsed.Path) - case "tcp": - return net.Listen("tcp", parsed.Host) default: - return nil, errors.New("unexpected scheme") + return defaultListenURL(parsed) } } diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_generic.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_generic.go new file mode 100644 index 0000000000..5553cd3b3c --- /dev/null +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_generic.go @@ -0,0 +1,13 @@ +//go:build !darwin && !linux && !windows +// +build !darwin,!linux,!windows + +package transport + +import ( + "net" + "net/url" +) + +func listenURL(url *url.URL) (net.Listener, error) { + return defaultListenURL(url) +} diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_linux.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_linux.go index 9f9ce85402..6590e68b13 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_linux.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_linux.go @@ -1,7 +1,6 @@ package transport import ( - "errors" "net" "net/url" "strconv" @@ -11,11 +10,7 @@ import ( const DefaultURL = "vsock://:1024" -func Listen(endpoint string) (net.Listener, error) { - parsed, err := url.Parse(endpoint) - if err != nil { - return nil, err - } +func listenURL(parsed *url.URL) (net.Listener, error) { switch parsed.Scheme { case "vsock": port, err := strconv.Atoi(parsed.Port()) @@ -23,25 +18,9 @@ func Listen(endpoint string) (net.Listener, error) { return nil, err } return mdlayhervsock.Listen(uint32(port), nil) - case "unix", "unixpacket": + case "unixpacket": return net.Listen(parsed.Scheme, parsed.Path) - case "tcp": - return net.Listen("tcp", parsed.Host) default: - return nil, errors.New("unexpected scheme") - } -} - -func ListenUnixgram(endpoint string) (*net.UnixConn, error) { - parsed, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - if parsed.Scheme != "unixgram" { - return nil, errors.New("unexpected scheme") + return defaultListenURL(parsed) } - return net.ListenUnixgram("unixgram", &net.UnixAddr{ - Name: parsed.Path, - Net: "unixgram", - }) } diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_windows.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_windows.go index cafb24735c..20c5d03568 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_windows.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/listen_windows.go @@ -1,7 +1,6 @@ package transport import ( - "errors" "net" "net/url" @@ -10,11 +9,7 @@ import ( const DefaultURL = "vsock://00000400-FACB-11E6-BD58-64006A7986D3" -func Listen(endpoint string) (net.Listener, error) { - parsed, err := url.Parse(endpoint) - if err != nil { - return nil, err - } +func listenURL(parsed *url.URL) (net.Listener, error) { switch parsed.Scheme { case "vsock": svcid, err := hvsock.GUIDFromString(parsed.Hostname()) @@ -25,19 +20,7 @@ func Listen(endpoint string) (net.Listener, error) { VMID: hvsock.GUIDWildcard, ServiceID: svcid, }) - case "unix": - return net.Listen(parsed.Scheme, parsed.Path) - case "tcp": - return net.Listen("tcp", parsed.Host) default: - return nil, errors.New("unexpected scheme") + return defaultListenURL(parsed) } } - -func ListenUnixgram(endpoint string) (net.Conn, error) { - return nil, errors.New("unsupported 'unixgram' scheme") -} - -func AcceptVfkit(listeningConn net.Conn) (net.Conn, error) { - return nil, errors.New("vfkit is unsupported on Windows") -} diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_unix.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_darwin.go similarity index 97% rename from vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_unix.go rename to vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_darwin.go index c5e33ad083..12d3c50a00 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_unix.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_darwin.go @@ -1,5 +1,5 @@ -//go:build !windows -// +build !windows +//go:build darwin +// +build darwin package transport diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_nondarwin.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_nondarwin.go new file mode 100644 index 0000000000..09a9948771 --- /dev/null +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/transport/unixgram_nondarwin.go @@ -0,0 +1,17 @@ +//go:build !darwin +// +build !darwin + +package transport + +import ( + "errors" + "net" +) + +func ListenUnixgram(_ string) (net.Conn, error) { + return nil, errors.New("unsupported 'unixgram' scheme") +} + +func AcceptVfkit(_ net.Conn) (net.Conn, error) { + return nil, errors.New("vfkit is unsupported on this platform") +} diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/types/gvproxy_command.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/types/gvproxy_command.go new file mode 100644 index 0000000000..8aa72e7eae --- /dev/null +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/types/gvproxy_command.go @@ -0,0 +1,189 @@ +package types + +import ( + "os/exec" + "strconv" +) + +type GvproxyCommand struct { + // Print packets on stderr + Debug bool + + // Length of packet + // Larger packets means less packets to exchange for the same amount of data (and less protocol overhead) + MTU int + + // Values passed in by forward-xxx flags in commandline (forward-xxx:info) + forwardInfo map[string][]string + + // List of endpoints the user wants to listen to + endpoints []string + + // Map of different sockets provided by user (socket-type flag:socket) + sockets map[string]string + + // File where gvproxy's pid is stored + PidFile string + + // SSHPort to access the guest VM + SSHPort int +} + +func NewGvproxyCommand() GvproxyCommand { + return GvproxyCommand{ + MTU: 1500, + SSHPort: 2222, + endpoints: []string{}, + forwardInfo: map[string][]string{}, + sockets: map[string]string{}, + } +} + +func (c *GvproxyCommand) checkSocketsInitialized() { + if len(c.sockets) < 1 { + c.sockets = map[string]string{} + } +} + +func (c *GvproxyCommand) checkForwardInfoInitialized() { + if len(c.forwardInfo) < 1 { + c.forwardInfo = map[string][]string{} + } +} + +func (c *GvproxyCommand) AddEndpoint(endpoint string) { + if len(c.endpoints) < 1 { + c.endpoints = []string{} + } + + c.endpoints = append(c.endpoints, endpoint) +} + +func (c *GvproxyCommand) AddVpnkitSocket(socket string) { + c.checkSocketsInitialized() + c.sockets["listen-vpnkit"] = socket +} + +func (c *GvproxyCommand) AddQemuSocket(socket string) { + c.checkSocketsInitialized() + c.sockets["listen-qemu"] = socket +} + +func (c *GvproxyCommand) AddBessSocket(socket string) { + c.checkSocketsInitialized() + c.sockets["listen-bess"] = socket +} + +func (c *GvproxyCommand) AddStdioSocket(socket string) { + c.checkSocketsInitialized() + c.sockets["listen-stdio"] = socket +} + +func (c *GvproxyCommand) AddVfkitSocket(socket string) { + c.checkSocketsInitialized() + c.sockets["listen-vfkit"] = socket +} + +func (c *GvproxyCommand) addForwardInfo(flag, value string) { + c.forwardInfo[flag] = append(c.forwardInfo[flag], value) +} + +func (c *GvproxyCommand) AddForwardSock(socket string) { + c.checkForwardInfoInitialized() + c.addForwardInfo("forward-sock", socket) +} + +func (c *GvproxyCommand) AddForwardDest(dest string) { + c.checkForwardInfoInitialized() + c.addForwardInfo("forward-dest", dest) +} + +func (c *GvproxyCommand) AddForwardUser(user string) { + c.checkForwardInfoInitialized() + c.addForwardInfo("forward-user", user) +} + +func (c *GvproxyCommand) AddForwardIdentity(identity string) { + c.checkForwardInfoInitialized() + c.addForwardInfo("forward-identity", identity) +} + +// socketsToCmdline converts Command.sockets to a commandline format +func (c *GvproxyCommand) socketsToCmdline() []string { + args := []string{} + + for socketFlag, socket := range c.sockets { + if socket != "" { + args = append(args, "-"+socketFlag, socket) + } + } + + return args +} + +// forwardInfoToCmdline converts Command.forwardInfo to a commandline format +func (c *GvproxyCommand) forwardInfoToCmdline() []string { + args := []string{} + + for forwardInfoFlag, forwardInfo := range c.forwardInfo { + for _, i := range forwardInfo { + if i != "" { + args = append(args, "-"+forwardInfoFlag, i) + } + } + } + + return args +} + +// endpointsToCmdline converts Command.endpoints to a commandline format +func (c *GvproxyCommand) endpointsToCmdline() []string { + args := []string{} + + for _, endpoint := range c.endpoints { + if endpoint != "" { + args = append(args, "-listen", endpoint) + } + } + + return args +} + +// ToCmdline converts Command to a properly formatted command for gvproxy based +// on its fields +func (c *GvproxyCommand) ToCmdline() []string { + args := []string{} + + // listen (endpoints) + args = append(args, c.endpointsToCmdline()...) + + // debug + if c.Debug { + args = append(args, "-debug") + } + + // mtu + args = append(args, "-mtu", strconv.Itoa(c.MTU)) + + // ssh-port + args = append(args, "-ssh-port", strconv.Itoa(c.SSHPort)) + + // sockets + args = append(args, c.socketsToCmdline()...) + + // forward info + args = append(args, c.forwardInfoToCmdline()...) + + // pid-file + if c.PidFile != "" { + args = append(args, "-pid-file", c.PidFile) + } + + return args +} + +// Cmd converts Command to a commandline format and returns an exec.Cmd which +// can be executed by os/exec +func (c *GvproxyCommand) Cmd(gvproxyPath string) *exec.Cmd { + return exec.Command(gvproxyPath, c.ToCmdline()...) // #nosec G204 +} diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/conn.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/conn.go index aff8966c57..038d8c5e08 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/conn.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/conn.go @@ -18,7 +18,7 @@ func (n *VirtualNetwork) Dial(network, addr string) (net.Conn, error) { } return gonet.DialTCP(n.stack, tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(ip.To4()), + Addr: tcpip.AddrFrom4Slice(ip.To4()), Port: uint16(port), }, ipv4.ProtocolNumber) } @@ -32,7 +32,7 @@ func (n *VirtualNetwork) DialContextTCP(ctx context.Context, addr string) (net.C return gonet.DialContextTCP(ctx, n.stack, tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(ip.To4()), + Addr: tcpip.AddrFrom4Slice(ip.To4()), Port: uint16(port), }, ipv4.ProtocolNumber) } @@ -44,7 +44,7 @@ func (n *VirtualNetwork) Listen(network, addr string) (net.Listener, error) { } return gonet.ListenTCP(n.stack, tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(ip.To4()), + Addr: tcpip.AddrFrom4Slice(ip.To4()), Port: uint16(port), }, ipv4.ProtocolNumber) } diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/mux.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/mux.go index 0b9c416057..c671177caa 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/mux.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/mux.go @@ -86,7 +86,7 @@ func (n *VirtualNetwork) Mux() *http.ServeMux { DialContext: func(ctx context.Context, network, address string) (net.Conn, error) { return gonet.DialContextTCP(ctx, n.stack, tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(net.ParseIP(ip).To4()), + Addr: tcpip.AddrFrom4Slice(net.ParseIP(ip).To4()), Port: uint16(port), }, ipv4.ProtocolNumber) }, diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/services.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/services.go index ec2173a2e5..722d258154 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/services.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/services.go @@ -53,7 +53,7 @@ func addServices(configuration *types.Configuration, s *stack.Stack, ipPool *tap func parseNATTable(configuration *types.Configuration) map[tcpip.Address]tcpip.Address { translation := make(map[tcpip.Address]tcpip.Address) for source, destination := range configuration.NAT { - translation[tcpip.Address(net.ParseIP(source).To4())] = tcpip.Address(net.ParseIP(destination).To4()) + translation[tcpip.AddrFrom4Slice(net.ParseIP(source).To4())] = tcpip.AddrFrom4Slice(net.ParseIP(destination).To4()) } return translation } @@ -61,7 +61,7 @@ func parseNATTable(configuration *types.Configuration) map[tcpip.Address]tcpip.A func dnsServer(configuration *types.Configuration, s *stack.Stack) (http.Handler, error) { udpConn, err := gonet.DialUDP(s, &tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(net.ParseIP(configuration.GatewayIP).To4()), + Addr: tcpip.AddrFrom4Slice(net.ParseIP(configuration.GatewayIP).To4()), Port: uint16(53), }, nil, ipv4.ProtocolNumber) if err != nil { @@ -70,7 +70,7 @@ func dnsServer(configuration *types.Configuration, s *stack.Stack) (http.Handler tcpLn, err := gonet.ListenTCP(s, tcpip.FullAddress{ NIC: 1, - Addr: tcpip.Address(net.ParseIP(configuration.GatewayIP).To4()), + Addr: tcpip.AddrFrom4Slice(net.ParseIP(configuration.GatewayIP).To4()), Port: uint16(53), }, ipv4.ProtocolNumber) if err != nil { diff --git a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/virtualnetwork.go b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/virtualnetwork.go index 5958738774..4133717d84 100644 --- a/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/virtualnetwork.go +++ b/vendor/github.com/containers/gvisor-tap-vsock/pkg/virtualnetwork/virtualnetwork.go @@ -115,7 +115,7 @@ func createStack(configuration *types.Configuration, endpoint stack.LinkEndpoint if err := s.AddProtocolAddress(1, tcpip.ProtocolAddress{ Protocol: ipv4.ProtocolNumber, - AddressWithPrefix: tcpip.Address(net.ParseIP(configuration.GatewayIP).To4()).WithPrefix(), + AddressWithPrefix: tcpip.AddrFrom4Slice(net.ParseIP(configuration.GatewayIP).To4()).WithPrefix(), }, stack.AddressProperties{}); err != nil { return nil, errors.New(err.String()) } @@ -128,14 +128,14 @@ func createStack(configuration *types.Configuration, endpoint stack.LinkEndpoint return nil, errors.Wrap(err, "cannot parse cidr") } - subnet, err := tcpip.NewSubnet(tcpip.Address(parsedSubnet.IP), tcpip.AddressMask(parsedSubnet.Mask)) + subnet, err := tcpip.NewSubnet(tcpip.AddrFromSlice(parsedSubnet.IP), tcpip.MaskFromBytes(parsedSubnet.Mask)) if err != nil { return nil, errors.Wrap(err, "cannot parse subnet") } s.SetRouteTable([]tcpip.Route{ { Destination: subnet, - Gateway: "", + Gateway: tcpip.Address{}, NIC: 1, }, }) diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 06bea9fab7..95bc08d5c3 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -81,6 +81,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://addr.tools/ * https://dnscheck.tools/ * https://github.com/egbakou/domainverifier +* https://github.com/semihalev/sdns Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index c1558b79c3..6d7e176054 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -5,6 +5,7 @@ import ( "net" "strconv" "strings" + "unicode" ) const hexDigit = "0123456789abcdef" @@ -330,8 +331,18 @@ func Fqdn(s string) string { // CanonicalName returns the domain name in canonical form. A name in canonical // form is lowercase and fully qualified. See Section 6.2 in RFC 4034. +// According to the RFC all uppercase US-ASCII letters in the owner name of the +// RR areeplaced by the corresponding lowercase US-ASCII letters. func CanonicalName(s string) string { - return strings.ToLower(Fqdn(s)) + var result strings.Builder + for _, ch := range s { + if unicode.IsUpper(ch) && (ch >= 0x00 && ch <= 0x7F) { + result.WriteRune(unicode.ToLower(ch)) + } else { + result.WriteRune(ch) + } + } + return Fqdn(result.String()) } // Copied from the official Go code. diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index d5049a4f95..b05cf14e9e 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -896,23 +896,38 @@ func (dns *Msg) String() string { return " MsgHdr" } s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "ZONE: " + strconv.Itoa(len(dns.Question)) + ", " + s += "PREREQ: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "UPDATE: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + } else { + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + } opt := dns.IsEdns0() if opt != nil { // OPT PSEUDOSECTION s += opt.String() + "\n" } if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; ZONE SECTION:\n" + } else { + s += "\n;; QUESTION SECTION:\n" + } for _, r := range dns.Question { s += r.String() + "\n" } } if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; PREREQUISITE SECTION:\n" + } else { + s += "\n;; ANSWER SECTION:\n" + } for _, r := range dns.Answer { if r != nil { s += r.String() + "\n" @@ -920,7 +935,11 @@ func (dns *Msg) String() string { } } if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; UPDATE SECTION:\n" + } else { + s += "\n;; AUTHORITY SECTION:\n" + } for _, r := range dns.Ns { if r != nil { s += r.String() + "\n" diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 03afeccda3..c9a03dec6d 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -236,6 +236,9 @@ var CertTypeToString = map[uint16]string{ CertOID: "OID", } +// Prefix for IPv4 encoded as IPv6 address +const ipv4InIPv6Prefix = "::ffff:" + //go:generate go run types_generate.go // Question holds a DNS question. Usually there is just one. While the @@ -751,6 +754,11 @@ func (rr *AAAA) String() string { if rr.AAAA == nil { return rr.Hdr.String() } + + if rr.AAAA.To4() != nil { + return rr.Hdr.String() + ipv4InIPv6Prefix + rr.AAAA.String() + } + return rr.Hdr.String() + rr.AAAA.String() } @@ -1517,7 +1525,7 @@ func (a *APLPrefix) str() string { case net.IPv6len: // add prefix for IPv4-mapped IPv6 if v4 := a.Network.IP.To4(); v4 != nil { - sb.WriteString("::ffff:") + sb.WriteString(ipv4InIPv6Prefix) } sb.WriteString(a.Network.IP.String()) } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 5891044a38..a091136629 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 55} +var Version = v{1, 1, 56} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index f4671ec1c7..cb72bd6f2b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,33 @@ +## 2.11.0 + +In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways: + +- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests +- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs. + +Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code. + +This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve. + +### Fixes +- Programmatic focus is no longer overwrriten by CLI filters [d6bba86] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38] +- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d] + +## 2.10.0 + +### Features +- feat(ginkgo/generators): add --tags flag (#1216) [a782a77] + adds a new --tags flag to ginkgo generate + +### Fixes +- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e] + +### Maintenance +- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e] + ## 2.9.7 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index 48d23f9191..be01dec979 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command { {Name: "template-data", KeyPath: "CustomTemplateData", UsageArgument: "template-data-file", Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, }, &conf, types.GinkgoFlagSections{}, @@ -59,6 +62,7 @@ You can also pass a of the form "file.go" and generate will emit "fil } type specData struct { + BuildTags string Package string Subject string PackageImportPath string @@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) { } data := specData{ + BuildTags: getBuildTags(conf.Tags), Package: determinePackageName(packageName, conf.Internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go index c3470adbfd..4dab07d036 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -1,6 +1,7 @@ package generators -var specText = `package {{.Package}} +var specText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} @@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `package {{.Package}} +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go index 3046a4487a..28c7aa6f43 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -1,6 +1,7 @@ package generators import ( + "fmt" "go/build" "os" "path/filepath" @@ -14,6 +15,7 @@ type GeneratorsConfig struct { Agouti, NoDot, Internal bool CustomTemplate string CustomTemplateData string + Tags string } func getPackageAndFormattedName() (string, string, string) { @@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string { return name + "_test" } + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index 966ea0c1a2..e3da7d14dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -8,22 +8,22 @@ import ( ) /* - If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to - unmark the container's focus. This gives developers a more intuitive experience when debugging specs. - It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - - this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: - - As a common example, consider: - - FDescribe("something to debug", function() { - It("works", function() {...}) - It("works", function() {...}) - FIt("doesn't work", function() {...}) - It("works", function() {...}) - }) - - here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. - The nested policy applied by this function enables this behavior. +If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to +unmark the container's focus. This gives developers a more intuitive experience when debugging specs. +It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - +this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: + +As a common example, consider: + + FDescribe("something to debug", function() { + It("works", function() {...}) + It("works", function() {...}) + FIt("doesn't work", function() {...}) + It("works", function() {...}) + }) + +here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. +The nested policy applied by this function enables this behavior. */ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { var walkTree func(tree *TreeNode) bool @@ -44,46 +44,43 @@ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { } /* - Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" - It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text - and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. +Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" +It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. - If any of the CLI flags are provided they take precedence. The file filters run first followed by the regex filters. +When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters. - This function sets the `Skip` property on specs by applying Ginkgo's focus policy: - - If there are no CLI arguments and no programmatic focus, do nothing. - - If there are no CLI arguments but a spec somewhere has programmatic focus, skip any specs that have no programmatic focus. - - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. +This function sets the `Skip` property on specs by applying Ginkgo's focus policy: +- If there are no CLI arguments and no programmatic focus, do nothing. +- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus. +- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. - *Note:* specs with pending nodes are Skipped when created by NewSpec. +*Note:* specs with pending nodes are Skipped when created by NewSpec. */ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") - hasFocusCLIFlags := focusString != "" || skipString != "" || len(suiteConfig.SkipFiles) > 0 || len(suiteConfig.FocusFiles) > 0 || suiteConfig.LabelFilter != "" - type SkipCheck func(spec Spec) bool // by default, skip any specs marked pending skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} hasProgrammaticFocus := false - if !hasFocusCLIFlags { - // check for programmatic focus - for _, spec := range specs { - if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { - skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) - hasProgrammaticFocus = true - break - } + for _, spec := range specs { + if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { + hasProgrammaticFocus = true + break } } + if hasProgrammaticFocus { + skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) + } + if suiteConfig.LabelFilter != "" { labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) - skipChecks = append(skipChecks, func(spec Spec) bool { - return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) }) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 6bc46150e7..f895739b83 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.9.7" +const VERSION = "2.11.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9b83dd6d48..1526497b9f 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,22 @@ +## 1.27.10 + +### Fixes +- fix: go 1.21 adding goroutine ID to creator+location (#685) [bdc7803] + +## 1.27.9 + +### Fixes +- Prevent nil-dereference in format.Object for boxed nil error (#681) [3b31fc3] + +### Maintenance +- Bump golang.org/x/net from 0.11.0 to 0.12.0 (#679) [360849b] +- chore: use String() instead of fmt.Sprintf (#678) [86f3659] +- Bump golang.org/x/net from 0.10.0 to 0.11.0 (#674) [642ead0] +- chore: unnecessary use of fmt.Sprintf (#677) [ceb9ca6] +- Bump github.com/onsi/ginkgo/v2 from 2.10.0 to 2.11.0 (#675) [a2087d8] +- docs: fix ContainSubstring references (#673) [fc9a89f] +- Bump github.com/onsi/ginkgo/v2 from 2.9.7 to 2.10.0 (#671) [9076019] + ## 1.27.8 ### Fixes diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 56bdd053bb..6c1680638b 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -259,7 +259,7 @@ func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) commonRepresentation := "" - if err, ok := object.(error); ok { + if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent } return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) @@ -302,7 +302,7 @@ func formatType(v reflect.Value) string { case reflect.Map: return fmt.Sprintf("%s | len:%d", v.Type(), v.Len()) default: - return fmt.Sprintf("%s", v.Type()) + return v.Type().String() } } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index bc7ec293de..1fd1803acf 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.27.8" +const GOMEGA_VERSION = "1.27.10" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index b832f3dbaf..bdaf62b56b 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -92,9 +92,9 @@ func Succeed() types.GomegaMatcher { // // These are valid use-cases: // -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) -// Expect(err).Should(MatchError(ContainsSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found" +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// Expect(err).Should(MatchError(ContainSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found" // // It is an error for err to be nil or an object that does not implement the // Error interface diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go index acffc8570f..93d4497c70 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -52,5 +52,5 @@ func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message } func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not be a directory")) + return format.Message(actual, "not be a directory") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go index 89441c8003..8fefc4deb7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -52,5 +52,5 @@ func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (messag } func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not be a regular file")) + return format.Message(actual, "not be a regular file") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go index ec6506b001..e2bdd28113 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -32,9 +32,9 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, } func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("to exist")) + return format.Message(actual, "to exist") } func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not to exist")) + return format.Message(actual, "not to exist") } diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 97dd8f3d22..5b4f691c70 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -12,6 +12,8 @@ type Spec struct { Root *Root `json:"root,omitempty"` // Hostname configures the container's hostname. Hostname string `json:"hostname,omitempty"` + // Domainname configures the container's domainname. + Domainname string `json:"domainname,omitempty"` // Mounts configures additional mounts (on top of Root). Mounts []Mount `json:"mounts,omitempty"` // Hooks configures callbacks for container lifecycle events. @@ -317,6 +319,10 @@ type LinuxMemory struct { DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` // Enables hierarchical memory accounting UseHierarchy *bool `json:"useHierarchy,omitempty"` + // CheckBeforeUpdate enables checking if a new memory limit is lower + // than the current usage during update, and if so, rejecting the new + // limit. + CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` } // LinuxCPU for Linux cgroup 'cpu' resource management @@ -325,6 +331,9 @@ type LinuxCPU struct { Shares *uint64 `json:"shares,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. Quota *int64 `json:"quota,omitempty"` + // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a + // given period. + Burst *uint64 `json:"burst,omitempty"` // CPU period to be used for hardcapping (in usecs). Period *uint64 `json:"period,omitempty"` // How much time realtime scheduling may use (in usecs). @@ -632,6 +641,23 @@ type Arch string // LinuxSeccompFlag is a flag to pass to seccomp(2). type LinuxSeccompFlag string +const ( + // LinuxSeccompFlagLog is a seccomp flag to request all returned + // actions except SECCOMP_RET_ALLOW to be logged. An administrator may + // override this filter flag by preventing specific actions from being + // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since + // Linux 4.14) + LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" + + // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store + // Bypass mitigation. (since Linux 4.17) + LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" + + // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait + // killable semantics. (since Linux 5.19) + LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" +) + // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 596af0c2fd..8ae4227b9b 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.1" ) // Version is the specification version that the package types support. diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 9ba6e10a4a..b419c761ed 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -49,7 +49,8 @@ var supportedKexAlgos = []string{ // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -59,8 +60,9 @@ var serverForbiddenKexAlgos = map[string]struct{}{ kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests } -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. var preferredKexAlgos = []string{ kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, @@ -70,12 +72,12 @@ var preferredKexAlgos = []string{ // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, + CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA512, KeyAlgoRSASHA256, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, @@ -85,7 +87,7 @@ var supportedHostKeyAlgos = []string{ // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // because they have reached the end of their useful life. var supportedMACs = []string{ - "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", } var supportedCompressions = []string{compressionNone} @@ -119,6 +121,13 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// isRSA returns whether algo is a supported RSA algorithm, including certificate +// algorithms. +func isRSA(algo string) bool { + algos := algorithmsForKeyFormat(KeyAlgoRSA) + return contains(algos, underlyingAlgo(algo)) +} + // supportedPubKeyAuthAlgos specifies the supported client public key // authentication algorithms. Note that this doesn't include certificate types // since those use the underlying algorithm. This list is sent to the client if @@ -262,16 +271,16 @@ type Config struct { // unspecified, a size suitable for the chosen cipher is used. RekeyThreshold uint64 - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. + // The allowed key exchanges algorithms. If unspecified then a default set + // of algorithms is used. Unsupported values are silently ignored. KeyExchanges []string - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. + // The allowed cipher algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. Ciphers []string - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. + // The allowed MAC algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. MACs []string } @@ -288,7 +297,7 @@ func (c *Config) SetDefaults() { var ciphers []string for _, c := range c.Ciphers { if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition + // Ignore the cipher if we have no cipherModes definition. ciphers = append(ciphers, c) } } @@ -297,10 +306,26 @@ func (c *Config) SetDefaults() { if c.KeyExchanges == nil { c.KeyExchanges = preferredKexAlgos } + var kexs []string + for _, k := range c.KeyExchanges { + if kexAlgoMap[k] != nil { + // Ignore the KEX if we have no kexAlgoMap definition. + kexs = append(kexs, k) + } + } + c.KeyExchanges = kexs if c.MACs == nil { c.MACs = supportedMACs } + var macs []string + for _, m := range c.MACs { + if macModes[m] != nil { + // Ignore the MAC if we have no macModes definition. + macs = append(macs, m) + } + } + c.MACs = macs if c.RekeyThreshold == 0 { // cipher specific default diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 927a90cd46..8a05f79902 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -23,6 +23,7 @@ const ( kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" kexAlgoECDH256 = "ecdh-sha2-nistp256" kexAlgoECDH384 = "ecdh-sha2-nistp384" kexAlgoECDH521 = "ecdh-sha2-nistp521" @@ -430,6 +431,17 @@ func init() { hashFunc: crypto.SHA256, } + // This is the group called diffie-hellman-group16-sha512 in RFC + // 8268 and Oakley Group 16 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA512, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 9e3870292f..b21322affa 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -370,6 +370,25 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c return authErr, perms, nil } +// isAlgoCompatible checks if the signature format is compatible with the +// selected algorithm taking into account edge cases that occur with old +// clients. +func isAlgoCompatible(algo, sigFormat string) bool { + // Compatibility for old clients. + // + // For certificate authentication with OpenSSH 7.2-7.7 signature format can + // be rsa-sha2-256 or rsa-sha2-512 for the algorithm + // ssh-rsa-cert-v01@openssh.com. + // + // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 + // for signature format ssh-rsa. + if isRSA(algo) && isRSA(sigFormat) { + return true + } + // Standard case: the underlying algorithm must match the signature format. + return underlyingAlgo(algo) == sigFormat +} + // ServerAuthError represents server authentication errors and is // sometimes returned by NewServerConn. It appends any authentication // errors that may occur, and is returned if all of the authentication @@ -567,7 +586,7 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - if underlyingAlgo(algo) != sig.Format { + if !isAlgoCompatible(algo, sig.Format) { authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) break } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf20..9a2dfd33a7 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go index 8b28031905..e8c1233455 100644 --- a/vendor/golang.org/x/net/html/render.go +++ b/vendor/golang.org/x/net/html/render.go @@ -194,9 +194,8 @@ func render1(w writer, n *Node) error { } } - // Render any child nodes. - switch n.Data { - case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + // Render any child nodes + if childTextNodesAreLiteral(n) { for c := n.FirstChild; c != nil; c = c.NextSibling { if c.Type == TextNode { if _, err := w.WriteString(c.Data); err != nil { @@ -213,7 +212,7 @@ func render1(w writer, n *Node) error { // last element in the file, with no closing tag. return plaintextAbort } - default: + } else { for c := n.FirstChild; c != nil; c = c.NextSibling { if err := render1(w, c); err != nil { return err @@ -231,6 +230,27 @@ func render1(w writer, n *Node) error { return w.WriteByte('>') } +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + // writeQuoted writes s to w surrounded by quotes. Normally it will use double // quotes, but if s contains a double quote, it will use single quotes. // It is used for writing the identifiers in a doctype declaration. diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 8512245952..0000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget https://curl.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f77..0000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 033b6e6db6..6d5e008874 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1012,14 +1012,6 @@ func (sc *serverConn) serve() { } } -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - type serverMessage int // Message values sent to serveMsgCh. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b9632380e7..4515b22c4a 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -19,6 +19,7 @@ import ( "io/fs" "log" "math" + "math/bits" mathrand "math/rand" "net" "net/http" @@ -290,8 +291,7 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tconnClosed bool + tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request @@ -518,11 +518,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { func authorityAddr(scheme string, authority string) (addr string) { host, port, err := net.SplitHostPort(authority) if err != nil { // authority didn't have a port + host = authority + port = "" + } + if port == "" { // authority's port was empty port = "443" if scheme == "http" { port = "80" } - host = authority } if a, err := idna.ToASCII(host); err == nil { host = a @@ -1677,7 +1680,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { return int(n) // doesn't truncate; max is 512K } -var bufPool sync.Pool // of *[]byte +// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running +// streaming requests using small frame sizes occupy large buffers initially allocated for prior +// requests needing big buffers. The size ranges are as follows: +// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB], +// {256 KB, 512 KB], {512 KB, infinity} +// In practice, the maximum scratch buffer size should not exceed 512 KB due to +// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used. +// It exists mainly as a safety measure, for potential future increases in max buffer size. +var bufPools [7]sync.Pool // of *[]byte +func bufPoolIndex(size int) int { + if size <= 16384 { + return 0 + } + size -= 1 + bits := bits.Len(uint(size)) + index := bits - 14 + if index >= len(bufPools) { + return len(bufPools) - 1 + } + return index +} func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { cc := cs.cc @@ -1695,12 +1718,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { // Scratch buffer for reading into & writing from. scratchLen := cs.frameScratchBufferLen(maxFrameSize) var buf []byte - if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { - defer bufPool.Put(bp) + index := bufPoolIndex(scratchLen) + if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPools[index].Put(bp) buf = *bp } else { buf = make([]byte, scratchLen) - defer bufPool.Put(&buf) + defer bufPools[index].Put(&buf) } var sawEOF bool diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 83f112c4c8..4756ad5f79 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -38,7 +38,7 @@ var X86 struct { HasAVX512F bool // Advanced vector extension 512 Foundation Instructions HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions @@ -54,6 +54,9 @@ var X86 struct { HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 HasCX16 bool // Compare and exchange 16 Bytes diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index f5aacfc825..2dcde8285d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -37,6 +37,9 @@ func initOptions() { {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, {Name: "cx16", Feature: &X86.HasCX16}, @@ -138,6 +141,10 @@ func archInit() { eax71, _, _, _ := cpuid(7, 1) X86.HasAVX512BF16 = isSet(5, eax71) } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 0c4d14929a..47fa6a7ebd 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -583,6 +583,7 @@ ccflags="$@" $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && @@ -624,7 +625,7 @@ ccflags="$@" $2 ~ /^MEM/ || $2 ~ /^WG/ || $2 ~ /^FIB_RULE_/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go new file mode 100644 index 0000000000..ca0513632e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris + +package unix + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index 86213c05d6..fa93d0aa90 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux -// +build linux +//go:build linux || netbsd +// +build linux netbsd package unix @@ -14,8 +14,17 @@ type mremapMmapper struct { mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) } +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, +} + func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 { return nil, EINVAL } @@ -32,9 +41,13 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ } bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) pNew := &bNew[cap(bNew)-1] - if flags&MREMAP_DONTUNMAP == 0 { + if flags&mremapDontunmap == 0 { delete(m.active, pOld) } m.active[pNew] = bNew return bNew, nil } + +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index c406ae00f4..9a6e5acacb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -535,21 +535,6 @@ func Fsync(fd int) error { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 7705c3270b..4217de518b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -601,20 +601,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { // Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, behav int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 206921504c..135cc3cd75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -510,30 +510,36 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { return nil, err } - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + for { + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // Read into buffer of that size. - buf := make([]KinfoProc, n/SizeofKinfoProc) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { - return nil, err - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + if err == ENOMEM { + // Process table grew. Try again. + continue + } + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n/SizeofKinfoProc], nil + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil + } } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 39de5f1430..0ba030197f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1885,7 +1885,7 @@ func Getpgrp() (pid int) { //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) -//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) @@ -2125,28 +2125,6 @@ func writevRacedetect(iovecs []Iovec, n int) { // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) //sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) - -var mapper = &mremapMmapper{ - mmapper: mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, - }, - mremap: mremap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - -func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - return mapper.Mremap(oldData, newLength, flags) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2155,6 +2133,12 @@ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) +const ( + mremapFixed = MREMAP_FIXED + mremapDontunmap = MREMAP_DONTUNMAP + mremapMaymove = MREMAP_MAYMOVE +) + // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { @@ -2454,6 +2438,62 @@ func Getresgid() (rgid, egid, sgid int) { return int(r), int(e), int(s) } +// Pselect is a wrapper around the Linux pselect6 system call. +// This version does not modify the timeout argument. +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + // Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES, + // The Linux pselect6() system call modifies its timeout argument. + // [Not modifying the argument] is the behavior required by POSIX.1-2001. + var mutableTimeout *Timespec + if timeout != nil { + mutableTimeout = new(Timespec) + *mutableTimeout = *timeout + } + + // The final argument of the pselect6() system call is not a + // sigset_t * pointer, but is instead a structure + var kernelMask *sigset_argpack + if sigmask != nil { + wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize + + // A sigset stores one bit per signal, + // offset by 1 (because signal 0 does not exist). + // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉. + sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits) + + sigsetBytes := uintptr(sigsetWords * (wordBits / 8)) + kernelMask = &sigset_argpack{ + ss: sigmask, + ssLen: sigsetBytes, + } + } + + return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) +} + +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 5b21fcfd75..70601ce369 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -40,7 +40,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index a81f5742b8..f5266689af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -33,7 +33,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 69d2d7c3db..f6ab02ec15 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -28,7 +28,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 76d564095e..93fe59d25d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -31,7 +31,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 35851ef70b..5e6ceee129 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -32,7 +32,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) @@ -177,3 +177,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) + +func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { + var setSize uintptr + + if set != nil { + setSize = uintptr(unsafe.Sizeof(*set)) + } + return riscvHWProbe(pairs, setSize, set, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 018d7d4782..ddd1ac8534 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -360,6 +360,18 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +const ( + mremapFixed = MAP_FIXED + mremapDontunmap = 0 + mremapMaymove = 0 +) + +//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP + +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { + return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) +} + /* * Unimplemented */ @@ -564,7 +576,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // mq_timedreceive // mq_timedsend // mq_unlink -// mremap // msgget // msgrcv // msgsnd diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b600a289d3..72d23575fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -716,20 +716,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8e48c29ec3..f6eda27050 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -147,6 +147,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { @@ -541,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index d3d49ec3ed..44e72edb42 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -285,25 +285,11 @@ func Close(fd int) (err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - // Dummy function: there are no semantics for Madvise on z/OS func Madvise(b []byte, advice int) (err error) { return } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3784f402e5..0787a043be 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2821,6 +2821,23 @@ const ( RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a46df0f1e5..cfb1430018 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6cd4a3ea9d..df64f2d590 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c7ebee24df..3025cd5b2d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 12a9a1389e..09e1ffbef9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index f26a164f4a..a457235407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 890bc3c9b7..fee7dfb819 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 549f26ac64..a5b2373aea 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index e0365e32c1..5dde82c98a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index fdccce15ca..2e80ea6b33 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index b2205c83fa..a65dcd7cbe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 81aa5ad0f6..cbd34e3d89 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 76807a1fd4..e4afa7a317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d4a5ab9e4e..44f45a039d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 66e65db951..74733e260f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 48984202c6..f5f3934b1a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -30,22 +30,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7ceec233fb..14ab34a565 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1356,7 +1356,7 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) { r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) if e1 != 0 { @@ -2197,3 +2197,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0b29239583..0ab4f2ed72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -531,3 +531,19 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(pairs) > 0 { + _p0 = unsafe.Pointer(&pairs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index cdb2af5ae0..35f499b32a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 9d25f76b0b..3cda65b0da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index d3f8035169..1e1fea902b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 887188a529..3b77da1107 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 3e594a8c09..ef285c567b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -251,6 +251,8 @@ const ( SYS_ACCEPT4 = 242 SYS_RECVMMSG = 243 SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_RISCV_HWPROBE = 258 + SYS_RISCV_FLUSH_ICACHE = 259 SYS_WAIT4 = 260 SYS_PRLIMIT64 = 261 SYS_FANOTIFY_INIT = 262 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 02e2462c8f..494493c78c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -866,6 +866,11 @@ const ( POLLNVAL = 0x20 ) +type sigset_argpack struct { + ss *Sigset_t + ssLen uintptr +} + type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -5863,3 +5868,18 @@ const ( VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 9ea54b7b86..83c69c119f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -718,3 +718,26 @@ type SysvShmDesc struct { _ uint64 _ uint64 } + +type RISCVHWProbePairs struct { + Key int64 + Value uint64 +} + +const ( + RISCV_HWPROBE_KEY_MVENDORID = 0x0 + RISCV_HWPROBE_KEY_MARCHID = 0x1 + RISCV_HWPROBE_KEY_MIMPID = 0x2 + RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3 + RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1 + RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 + RISCV_HWPROBE_IMA_FD = 0x1 + RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 + RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 + RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 + RISCV_HWPROBE_MISALIGNED_SLOW = 0x2 + RISCV_HWPROBE_MISALIGNED_FAST = 0x3 + RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 + RISCV_HWPROBE_MISALIGNED_MASK = 0x7 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 9645900754..67bad0926a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -135,14 +135,14 @@ func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } @@ -216,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -437,6 +437,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1624,6 +1628,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 566dd3e315..5c385580f6 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -468,6 +469,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -2367,11 +2370,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -4017,6 +4017,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go index ee45f49474..1153baf291 100644 --- a/vendor/golang.org/x/text/language/match.go +++ b/vendor/golang.org/x/text/language/match.go @@ -434,7 +434,7 @@ func newMatcher(supported []Tag, options []MatchOption) *matcher { // (their canonicalization simply substitutes a different language code, but // nothing else), the match confidence is Exact, otherwise it is High. for i, lm := range language.AliasMap { - // If deprecated codes match and there is no fiddling with the script or + // If deprecated codes match and there is no fiddling with the script // or region, we consider it an exact match. conf := Exact if language.AliasTypes[i] != language.Macro { diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go index 423386bf43..e4250ae22c 100644 --- a/vendor/golang.org/x/text/unicode/norm/trie.go +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -29,7 +29,7 @@ var ( nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 3fbfebf369..1fc1de0bd1 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -64,8 +64,9 @@ type event struct { // depth-first order. It calls f(n) for each node n before it visits // n's children. // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of -// events. The function f if is called only for nodes whose type +// events. The function f is called only for nodes whose type // matches an element of the types slice. func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning @@ -97,6 +98,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8f..03543bd4bb 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82a..0454cdd78e 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,7 +8,6 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" @@ -16,7 +15,7 @@ import ( var debug = false -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) @@ -29,21 +28,21 @@ func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner * inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" } else { - return nil, friendlyErr + return "", "", friendlyErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2e..b5de9cf9f2 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "go/types" "io/ioutil" "log" "os" @@ -153,10 +152,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch sizeswg.Done() }() } @@ -625,7 +624,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,16 +667,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") @@ -891,6 +891,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b808..124a6fe143 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -220,8 +220,10 @@ type driverResponse struct { // lists of multiple drivers, go/packages will fall back to the next driver. NotHandled bool - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string // Roots is the set of package IDs that make up the root packages. // We have to encode this separately because when we encode a single package @@ -262,7 +264,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { if err != nil { return nil, err } - l.sizes = response.Sizes + l.sizes = types.SizesFor(response.Compiler, response.Arch) return l.refine(response) } @@ -308,6 +310,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source @@ -627,7 +632,7 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. func (ld *loader) refine(response *driverResponse) ([]*Package, error) { roots := response.Roots @@ -1040,6 +1045,9 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Error: appendError, Sizes: ld.sizes, } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { appendError(Error{ diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index aa7dfaccf5..fa5834baf7 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -29,10 +29,10 @@ import ( "sort" "strconv" "strings" + _ "unsafe" "golang.org/x/tools/internal/typeparams" - - _ "unsafe" // for go:linkname + "golang.org/x/tools/internal/typesinternal" ) // A Path is an opaque name that identifies a types.Object @@ -123,8 +123,20 @@ func For(obj types.Object) (Path, error) { // An Encoder amortizes the cost of encoding the paths of multiple objects. // The zero value of an Encoder is ready to use. type Encoder struct { - scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() - namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + skipMethodSorting bool +} + +// Expose back doors so that gopls can avoid method sorting, which can dominate +// analysis on certain repositories. +// +// TODO(golang/go#61443): remove this. +func init() { + typesinternal.SkipEncoderMethodSorting = func(enc interface{}) { + enc.(*Encoder).skipMethodSorting = true + } + typesinternal.ObjectpathObject = object } // For returns the path to an object relative to its package, @@ -139,6 +151,17 @@ type Encoder struct { // These objects are sufficient to define the API of their package. // The objects described by a package's export data are drawn from this set. // +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// // For does not return a path for predeclared names, imported package // names, local names, and unexported package-level names (except // types). @@ -257,15 +280,14 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := enc.scopeNames(scope) - for _, name := range names { - o := scope.Lookup(name) + objs := enc.scopeObjects(scope) + for _, o := range objs { tname, ok := o.(*types.TypeName) if !ok { continue // handle non-types in second pass } - path := append(empty, name...) + path := append(empty, o.Name()...) path = append(path, opType) T := o.Type() @@ -291,9 +313,8 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) + for _, o := range objs { + path := append(empty, o.Name()...) if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) @@ -307,16 +328,31 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Inspect declared methods of defined types. if T, ok := o.Type().(*types.Named); ok { path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - for i, m := range enc.namedMethods(T) { - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method + if !enc.skipMethodSorting { + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil + } else { + // This branch must match the logic in the branch above, using go/types + // APIs without sorting. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } } } } @@ -411,10 +447,23 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { path := make([]byte, 0, len(name)+8) path = append(path, name...) path = append(path, opType) - for i, m := range enc.namedMethods(named) { - if m == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true + + if !enc.skipMethodSorting { + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } else { + // This branch must match the logic of the branch above, using go/types + // APIs without sorting. + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } } } @@ -527,11 +576,16 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { + return object(pkg, string(p), false) +} + +// Note: the skipMethodSorting parameter must match the value of +// Encoder.skipMethodSorting used during encoding. +func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.Object, error) { + if pathstr == "" { return nil, fmt.Errorf("empty path") } - pathstr := string(p) var pkgobj, suffix string if dot := strings.IndexByte(pathstr, opType); dot < 0 { pkgobj = pathstr @@ -690,11 +744,15 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { obj = t.Method(index) // Id-ordered case *types.Named: - methods := namedMethods(t) // (unmemoized) - if index >= len(methods) { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + if skipMethodSorting { + obj = t.Method(index) + } else { + methods := namedMethods(t) // (unmemoized) + obj = methods[index] // Id-ordered } - obj = methods[index] // Id-ordered default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) @@ -748,17 +806,22 @@ func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { return methods } -// scopeNames is a memoization of scope.Names. Callers must not modify the result. -func (enc *Encoder) scopeNames(scope *types.Scope) []string { - m := enc.scopeNamesMemo +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo if m == nil { - m = make(map[*types.Scope][]string) - enc.scopeNamesMemo = m + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m } - names, ok := m[scope] + objs, ok := m[scope] if !ok { - names = scope.Names() // allocates and sorts - m[scope] = names + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs } - return names + return objs } diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 0000000000..581b26c204 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d3..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de01470..d98b0db2a9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index a973dece93..b1223713b9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -230,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index a0dc0b5e27..6103dd7102 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,23 @@ import ( "strconv" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. @@ -47,19 +53,27 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { // IImportShallow decodes "shallow" types.Package data encoded by // IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) if err != nil { return nil, err } return pkgs[0], nil } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -313,8 +327,9 @@ type iexporter struct { out *bytes.Buffer version int - shallow bool // don't put types from other packages in the index - localpkg *types.Package // (nil in bundle mode) + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -354,6 +369,17 @@ func (p *iexporter) trace(format string, args ...interface{}) { fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + // stringOff returns the offset of s within the string section. // If not already present, it's added to the end. func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +439,6 @@ type exportWriter struct { p *iexporter data intWriter - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -436,7 +461,6 @@ func (p *iexporter) doDecl(obj types.Object) { }() } w := p.newWriter() - w.setPkg(obj.Pkg(), false) switch obj := obj.(type) { case *types.Var: @@ -673,6 +697,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -764,30 +791,53 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.setPkg(pkg, true) + w.pkg(pkg) w.signature(t) case *types.Struct: w.startType(structType) n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg if n > 0 { - w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects - } else { - w.setPkg(pkg, true) + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } } + w.pkg(fieldPkg) w.uint64(uint64(n)) + for i := 0; i < n; i++ { f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } w.pos(f.Pos()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), pkg) + w.typ(f.Type(), fieldPkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } case *types.Interface: w.startType(interfaceType) - w.setPkg(pkg, true) + w.pkg(pkg) n := t.NumEmbeddeds() w.uint64(uint64(n)) @@ -802,10 +852,16 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.typ(ft, tPkg) } + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + n = t.NumExplicitMethods() w.uint64(uint64(n)) for i := 0; i < n; i++ { m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } w.pos(m.Pos()) w.string(m.Name()) sig, _ := m.Type().(*types.Signature) @@ -827,12 +883,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return } - - w.currPkg = pkg + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) } func (w *exportWriter) signature(sig *types.Signature) { @@ -913,6 +1018,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -969,6 +1085,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1304,19 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index be6dace153..8e64cf644f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,6 +21,7 @@ import ( "sort" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/typeparams" ) @@ -85,7 +86,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) if err != nil { return 0, nil, err } @@ -94,33 +95,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) } -// A GetPackageFunc is a function that gets the package with the given path -// from the importer state, creating it (with the specified name) if necessary. -// It is an abstraction of the map historically used to memoize package creation. +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. // -// Two calls with the same path must return the same package. -// -// If the given getPackage func returns nil, the import will fail. -type GetPackageFunc = func(path, name string) *types.Package +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} -// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the -// given map of package path -> package. +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. // -// The resulting func may mutate m: if a requested package is not found, a new -// package will be inserted into m. -func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { - return func(path, name string) *types.Package { - if _, ok := m[path]; !ok { - m[path] = types.NewPackage(path, name) +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg } - return m[path] + return nil } } -func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -131,7 +148,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -140,11 +157,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -162,7 +176,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, sLen := int64(r.uint64()) var fLen int64 var fileOffset []uint64 - if insert != nil { + if shallow { // Shallow mode uses a different position encoding. fLen = int64(r.uint64()) fileOffset = make([]uint64, r.uint64()) @@ -181,7 +195,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, p := iimporter{ version: int(version), ipath: path, - insert: insert, + shallow: shallow, + reportf: reportf, stringData: stringData, stringCache: make(map[uint64]string), @@ -208,8 +223,9 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, p.typCache[uint64(i)] = pt } - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) @@ -218,29 +234,42 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, if pkgPath == "" { pkgPath = path } - pkg := getPackage(pkgPath, pkgName) - if pkg == nil { - errorf("internal error: getPackage returned nil package for %s", pkgPath) - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - if i == 0 && !bundle { - p.localpkg = pkg - } - - p.pkgCache[pkgPathOff] = pkg + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff // Read index for package. nameIndex := make(map[string]uint64) nSyms := r.uint64() - // In shallow mode we don't expect an index for other packages. - assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } - p.pkgIndex[pkg] = nameIndex + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex pkgList[i] = pkg } @@ -299,6 +328,13 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, typ.Complete() } + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + return pkgs, nil } @@ -311,8 +347,8 @@ type iimporter struct { version int ipath string - localpkg *types.Package - insert func(pkg *types.Package, name string) // "shallow" mode only + shallow bool + reportf ReportFunc // if non-nil, used to report bugs stringData []byte stringCache map[uint64]string @@ -329,6 +365,12 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + // Arguments for calls to SetConstraint that are deferred due to recursive types later []setConstraintArgs @@ -360,13 +402,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { - // In "shallow" mode, call back to the application to - // find the object and insert it into the package scope. - if p.insert != nil { - assert(pkg != p.localpkg) - p.insert(pkg, name) // "can't fail" - return - } + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. errorf("%v.%v not in index", pkg, name) } @@ -733,7 +771,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { - if r.p.insert != nil { // shallow mode + if r.p.shallow { + // precise offsets are encoded only in shallow mode return r.posv2() } if r.p.version >= iexportVersionPosCol { @@ -834,13 +873,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + fpos := r.pos() fname := r.ident() ftyp := r.typ() emb := r.bool() tag := r.string() - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field tags[i] = tag } return types.NewStruct(fields, tags) @@ -856,6 +910,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods := make([]*types.Func, r.uint64()) for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + mpos := r.pos() mname := r.ident() @@ -865,9 +924,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if base != nil { recv = types.NewVar(token.NoPos, r.currPkg, "", base) } - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method } typ := newInterface(methods, embeddeds) @@ -905,6 +967,9 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment t, _ := typeparams.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) return t case unionType: @@ -923,6 +988,26 @@ func (r *importReader) kind() itag { return itag(r.uint64()) } +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 34fc783f82..b977435f62 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,6 +10,7 @@ package gcimporter import ( + "fmt" "go/token" "go/types" "sort" @@ -63,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 3c0afe723b..53cf66da01 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -24,6 +24,9 @@ import ( exec "golang.org/x/sys/execabs" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -53,9 +56,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -63,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } @@ -70,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -301,7 +319,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close // should cause the Read call in io.Copy to unblock and return // immediately, but we still need to receive from stdoutErr to confirm - // that that has happened. + // that it has happened. <-stdoutErr err2 = ctx.Err() } @@ -315,7 +333,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { // one goroutine at a time will call Write.” // // Since we're starting a goroutine that writes to cmd.Stdout, we must - // also update cmd.Stderr so that that still holds. + // also update cmd.Stderr so that it still holds. func() { defer func() { recover() }() if cmd.Stderr == prevStdout { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index cfba8189f1..d0d0649fe2 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -23,6 +23,7 @@ package typeparams import ( + "fmt" "go/ast" "go/token" "go/types" @@ -105,6 +106,31 @@ func OriginMethod(fn *types.Func) *types.Func { } orig := NamedTypeOrigin(named) gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + return gfn.(*types.Func) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec90..71248209ee 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23d..cbd12f8013 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go index b4788978ff..7ed86e1711 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -129,7 +129,7 @@ func NamedTypeArgs(*types.Named) *TypeList { } // NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named { return named } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go index 114a36b866..cf301af1db 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -103,7 +103,7 @@ func NamedTypeArgs(named *types.Named) *TypeList { } // NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named { return named.Origin() } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d98..7350bb702a 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go new file mode 100644 index 0000000000..5e96e89557 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import "go/types" + +// This file contains back doors that allow gopls to avoid method sorting when +// using the objectpath package. +// +// This is performance-critical in certain repositories, but changing the +// behavior of the objectpath package is still being discussed in +// golang/go#61443. If we decide to remove the sorting in objectpath we can +// simply delete these back doors. Otherwise, we should add a new API to +// objectpath that allows controlling the sorting. + +// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as +// not requiring sorted methods. +var SkipEncoderMethodSorting func(enc interface{}) + +// ObjectpathObject is like objectpath.Object, but allows suppressing method +// sorting. +var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 3c53fbc63b..ce7d4351b2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,8 +11,6 @@ import ( "go/types" "reflect" "unsafe" - - "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -52,10 +50,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } - -// NewObjectpathEncoder returns a function closure equivalent to -// objectpath.For but amortized for multiple (sequential) calls. -// It is a temporary workaround, pending the approval of proposal 58668. -// -//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor -func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s index e6b4e56a95..c38f1cb661 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_amd64.s @@ -16,28 +16,28 @@ #include "textflag.h" -TEXT ·andUint32(SB),NOSPLIT,$0-12 +TEXT ·andUint32(SB),NOSPLIT|NOFRAME,$0-12 MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK ANDL AX, 0(BX) RET -TEXT ·orUint32(SB),NOSPLIT,$0-12 +TEXT ·orUint32(SB),NOSPLIT|NOFRAME,$0-12 MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK ORL AX, 0(BX) RET -TEXT ·xorUint32(SB),NOSPLIT,$0-12 +TEXT ·xorUint32(SB),NOSPLIT|NOFRAME,$0-12 MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK XORL AX, 0(BX) RET -TEXT ·compareAndSwapUint32(SB),NOSPLIT,$0-20 +TEXT ·compareAndSwapUint32(SB),NOSPLIT|NOFRAME,$0-20 MOVQ addr+0(FP), DI MOVL old+8(FP), AX MOVL new+12(FP), DX @@ -46,28 +46,28 @@ TEXT ·compareAndSwapUint32(SB),NOSPLIT,$0-20 MOVL AX, ret+16(FP) RET -TEXT ·andUint64(SB),NOSPLIT,$0-16 +TEXT ·andUint64(SB),NOSPLIT|NOFRAME,$0-16 MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK ANDQ AX, 0(BX) RET -TEXT ·orUint64(SB),NOSPLIT,$0-16 +TEXT ·orUint64(SB),NOSPLIT|NOFRAME,$0-16 MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK ORQ AX, 0(BX) RET -TEXT ·xorUint64(SB),NOSPLIT,$0-16 +TEXT ·xorUint64(SB),NOSPLIT|NOFRAME,$0-16 MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK XORQ AX, 0(BX) RET -TEXT ·compareAndSwapUint64(SB),NOSPLIT,$0-32 +TEXT ·compareAndSwapUint64(SB),NOSPLIT|NOFRAME,$0-32 MOVQ addr+0(FP), DI MOVQ old+8(FP), AX MOVQ new+16(FP), DX diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s index b2b950c741..cf922117d8 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_arm64.s @@ -17,7 +17,7 @@ #include "textflag.h" TEXT ·andUint32(SB),NOSPLIT,$0-12 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVW val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -32,7 +32,7 @@ load_store_loop: RET TEXT ·orUint32(SB),NOSPLIT,$0-12 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVW val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -46,7 +46,7 @@ load_store_loop: RET TEXT ·xorUint32(SB),NOSPLIT,$0-12 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVW val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -66,7 +66,7 @@ TEXT ·compareAndSwapUint32(SB),NOSPLIT,$0-20 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop CASALW R1, (R0), R2 - MOVW R1, prev+16(FP) + MOVW R1, ret+16(FP) RET load_store_loop: LDAXRW (R0), R3 @@ -75,11 +75,11 @@ load_store_loop: STLXRW R2, (R0), R4 CBNZ R4, load_store_loop ok: - MOVW R3, prev+16(FP) + MOVW R3, ret+16(FP) RET TEXT ·andUint64(SB),NOSPLIT,$0-16 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVD val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -94,7 +94,7 @@ load_store_loop: RET TEXT ·orUint64(SB),NOSPLIT,$0-16 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVD val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -108,7 +108,7 @@ load_store_loop: RET TEXT ·xorUint64(SB),NOSPLIT,$0-16 - MOVD ptr+0(FP), R0 + MOVD addr+0(FP), R0 MOVD val+8(FP), R1 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop @@ -128,7 +128,7 @@ TEXT ·compareAndSwapUint64(SB),NOSPLIT,$0-32 MOVBU ·arm64HasATOMICS(SB), R4 CBZ R4, load_store_loop CASALD R1, (R0), R2 - MOVD R1, prev+24(FP) + MOVD R1, ret+24(FP) RET load_store_loop: LDAXR (R0), R3 @@ -137,5 +137,5 @@ load_store_loop: STLXR R2, (R0), R4 CBNZ R4, load_store_loop ok: - MOVD R3, prev+24(FP) + MOVD R3, ret+24(FP) RET diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go new file mode 100644 index 0000000000..22e8e3b0a8 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_float64.go @@ -0,0 +1,105 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomicbitops + +import ( + "math" + + "gvisor.dev/gvisor/pkg/sync" +) + +// Float64 is an atomic 64-bit floating-point number. +// +// +stateify savable +type Float64 struct { + _ sync.NoCopy + // bits stores the bit of a 64-bit floating point number. + // It is not (and should not be interpreted as) a real uint64. + bits Uint64 +} + +// FromFloat64 returns a Float64 initialized to value v. +// +//go:nosplit +func FromFloat64(v float64) Float64 { + return Float64{bits: FromUint64(math.Float64bits(v))} +} + +// Load loads the floating-point value. +// +//go:nosplit +func (f *Float64) Load() float64 { + return math.Float64frombits(f.bits.Load()) +} + +// RacyLoad is analogous to reading an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (f *Float64) RacyLoad() float64 { + return math.Float64frombits(f.bits.RacyLoad()) +} + +// Store stores the given floating-point value in the Float64. +// +//go:nosplit +func (f *Float64) Store(v float64) { + f.bits.Store(math.Float64bits(v)) +} + +// RacyStore is analogous to setting an atomic value without using +// synchronization. +// +// It may be helpful to document why a racy operation is permitted. +// +//go:nosplit +func (f *Float64) RacyStore(v float64) { + f.bits.RacyStore(math.Float64bits(v)) +} + +// Swap stores the given value and returns the previously-stored one. +// +//go:nosplit +func (f *Float64) Swap(v float64) float64 { + return math.Float64frombits(f.bits.Swap(math.Float64bits(v))) +} + +// CompareAndSwap does a compare-and-swap operation on the float64 value. +// Note that unlike typical IEEE 754 semantics, this function will treat NaN +// as equal to itself if all of its bits exactly match. +// +//go:nosplit +func (f *Float64) CompareAndSwap(oldVal, newVal float64) bool { + return f.bits.CompareAndSwap(math.Float64bits(oldVal), math.Float64bits(newVal)) +} + +// Add increments the float by the given value. +// Note that unlike an atomic integer, this requires spin-looping until we win +// the compare-and-swap race, so this may take an indeterminate amount of time. +// +//go:nosplit +func (f *Float64) Add(v float64) { + // We do a racy load here because we optimistically think it may pass the + // compare-and-swap operation. If it doesn't, we'll load it safely, so this + // is OK and not a race for the overall intent of the user to add a number. + sync.RaceDisable() + oldVal := f.RacyLoad() + for !f.CompareAndSwap(oldVal, oldVal+v) { + oldVal = f.Load() + } + sync.RaceEnable() +} diff --git a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go index 9035323311..bc8fc35d22 100644 --- a/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/atomicbitops/atomicbitops_state_autogen.go @@ -11,6 +11,31 @@ import ( "gvisor.dev/gvisor/pkg/state" ) +func (f *Float64) StateTypeName() string { + return "pkg/atomicbitops.Float64" +} + +func (f *Float64) StateFields() []string { + return []string{ + "bits", + } +} + +func (f *Float64) beforeSave() {} + +// +checklocksignore +func (f *Float64) StateSave(stateSinkObject state.Sink) { + f.beforeSave() + stateSinkObject.Save(0, &f.bits) +} + +func (f *Float64) afterLoad() {} + +// +checklocksignore +func (f *Float64) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &f.bits) +} + func (b *Bool) StateTypeName() string { return "pkg/atomicbitops.Bool" } @@ -37,5 +62,6 @@ func (b *Bool) StateLoad(stateSourceObject state.Source) { } func init() { + state.Register((*Float64)(nil)) state.Register((*Bool)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go similarity index 97% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go index a7459a376a..cc663ae786 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer.go @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package bufferv2 provides the implementation of a non-contiguous buffer that +// Package buffer provides the implementation of a non-contiguous buffer that // is reference counted, pooled, and copy-on-write. It allows O(1) append, // and prepend operations. -package bufferv2 +package buffer import ( "fmt" @@ -414,6 +414,16 @@ func (b *Buffer) Clone() Buffer { return other } +// DeepClone creates a deep clone of b, copying data such that no bytes are +// shared with any other Buffers. +func (b *Buffer) DeepClone() Buffer { + newBuf := Buffer{} + buf := b.Clone() + reader := buf.AsBufferReader() + newBuf.WriteFromReader(&reader, b.size) + return newBuf +} + // Apply applies the given function across all valid data. func (b *Buffer) Apply(fn func(*View)) { for v := b.data.Front(); v != nil; v = v.Next() { @@ -468,13 +478,11 @@ func (b *Buffer) Checksum(offset int) uint16 { // The other Buffer will be appended to v, and other will be empty after this // operation completes. func (b *Buffer) Merge(other *Buffer) { - // Copy over all buffers. - for v := other.data.Front(); v != nil; v = other.data.Front() { - b.Append(v.Clone()) - other.removeView(v) - } + b.data.PushBackList(&other.data) + other.data = viewList{} // Adjust sizes. + b.size += other.size other.size = 0 } diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer_state.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go similarity index 97% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer_state.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go index 378611c827..8b8e15ea26 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/buffer_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package bufferv2 +package buffer // saveData is invoked by stateify. func (b *Buffer) saveData() []byte { diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go similarity index 94% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_state_autogen.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go index 408c476997..7587787cf5 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_state_autogen.go @@ -1,13 +1,13 @@ // automatically generated by stateify. -package bufferv2 +package buffer import ( "gvisor.dev/gvisor/pkg/state" ) func (b *Buffer) StateTypeName() string { - return "pkg/bufferv2.Buffer" + return "pkg/buffer.Buffer" } func (b *Buffer) StateFields() []string { @@ -37,7 +37,7 @@ func (b *Buffer) StateLoad(stateSourceObject state.Source) { } func (c *chunk) StateTypeName() string { - return "pkg/bufferv2.chunk" + return "pkg/buffer.chunk" } func (c *chunk) StateFields() []string { @@ -65,7 +65,7 @@ func (c *chunk) StateLoad(stateSourceObject state.Source) { } func (r *chunkRefs) StateTypeName() string { - return "pkg/bufferv2.chunkRefs" + return "pkg/buffer.chunkRefs" } func (r *chunkRefs) StateFields() []string { @@ -89,7 +89,7 @@ func (r *chunkRefs) StateLoad(stateSourceObject state.Source) { } func (v *View) StateTypeName() string { - return "pkg/bufferv2.View" + return "pkg/buffer.View" } func (v *View) StateFields() []string { @@ -120,7 +120,7 @@ func (v *View) StateLoad(stateSourceObject state.Source) { } func (l *viewList) StateTypeName() string { - return "pkg/bufferv2.viewList" + return "pkg/buffer.viewList" } func (l *viewList) StateFields() []string { @@ -148,7 +148,7 @@ func (l *viewList) StateLoad(stateSourceObject state.Source) { } func (e *viewEntry) StateTypeName() string { - return "pkg/bufferv2.viewEntry" + return "pkg/buffer.viewEntry" } func (e *viewEntry) StateFields() []string { diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go similarity index 70% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_unsafe_state_autogen.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go index 78185e6e9f..5a5c407227 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/bufferv2_unsafe_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/buffer_unsafe_state_autogen.go @@ -1,3 +1,3 @@ // automatically generated by stateify. -package bufferv2 +package buffer diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go similarity index 99% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go index 8a078acf5c..551f06dbd7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package bufferv2 +package buffer import ( "fmt" diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk_refs.go b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go similarity index 99% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk_refs.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go index cf082532aa..4d2d3c3da3 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/chunk_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/chunk_refs.go @@ -1,4 +1,4 @@ -package bufferv2 +package buffer import ( "fmt" @@ -43,7 +43,8 @@ type chunkRefs struct { // InitRefs initializes r with one reference and, if enabled, activates leak // checking. func (r *chunkRefs) InitRefs() { - r.refCount.Store(1) + + r.refCount.RacyStore(1) refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go similarity index 99% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/view.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/view.go index 8e45f220eb..d7eb2f1183 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package bufferv2 +package buffer import ( "fmt" diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view_list.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go similarity index 99% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/view_list.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go index 69cef7f715..eb9aa9a77d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view_list.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view_list.go @@ -1,4 +1,4 @@ -package bufferv2 +package buffer // ElementMapper provides an identity mapping by default. // diff --git a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go similarity index 97% rename from vendor/gvisor.dev/gvisor/pkg/bufferv2/view_unsafe.go rename to vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go index d2b6d618e0..cef7e7ed8a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/bufferv2/view_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/buffer/view_unsafe.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package bufferv2 +package buffer import ( "reflect" diff --git a/vendor/gvisor.dev/gvisor/pkg/context/context.go b/vendor/gvisor.dev/gvisor/pkg/context/context.go index 469e254ca6..7f94da2478 100644 --- a/vendor/gvisor.dev/gvisor/pkg/context/context.go +++ b/vendor/gvisor.dev/gvisor/pkg/context/context.go @@ -24,6 +24,7 @@ package context import ( "context" + "errors" "sync" "time" @@ -49,6 +50,11 @@ type Blocker interface { // result of the requested event (versus an external interrupt). BlockOn(waiter.Waitable, waiter.EventMask) bool + // Block blocks until an event is received from C, or some external + // interrupt. It returns nil if an event is received from C and an err if t + // is interrupted. + Block(C <-chan struct{}) error + // BlockWithTimeoutOn blocks until either the conditions of Block are // satisfied, or the timeout is hit. Note that deadlines are not supported // since the notion of "with respect to what clock" is not resolved. @@ -88,6 +94,19 @@ func (nt *NoTask) Interrupted() bool { return nt.cancel != nil && len(nt.cancel) > 0 } +// Block implements Blocker.Block. +func (nt *NoTask) Block(C <-chan struct{}) error { + if nt.cancel == nil { + nt.cancel = make(chan struct{}, 1) + } + select { + case <-nt.cancel: + return errors.New("interrupted system call") // Interrupted. + case <-C: + return nil + } +} + // BlockOn implements Blocker.BlockOn. func (nt *NoTask) BlockOn(w waiter.Waitable, mask waiter.EventMask) bool { if nt.cancel == nil { diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go index 585a9a7d2f..413b849301 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid.go @@ -28,8 +28,14 @@ package cpuid import ( + "encoding/binary" "fmt" + "os" + "runtime" "strings" + + "gvisor.dev/gvisor/pkg/log" + "gvisor.dev/gvisor/pkg/sync" ) // contextID is the package for context.Context.Value keys. @@ -38,6 +44,11 @@ type contextID int const ( // CtxFeatureSet is the FeatureSet for the context. CtxFeatureSet contextID = iota + + // hardware capability bit vector. + _AT_HWCAP = 16 + // hardware capability bit vector 2. + _AT_HWCAP2 = 26 ) // context represents context.Context. @@ -171,3 +182,83 @@ func (fs FeatureSet) CheckHostCompatible() error { // Make arch-specific checks. return fs.archCheckHostCompatible(hfs) } + +// +stateify savable +type hwCap struct { + // hwCap1 stores HWCAP bits exposed through the elf auxiliary vector. + hwCap1 uint64 + // hwCap2 stores HWCAP2 bits exposed through the elf auxiliary vector. + hwCap2 uint64 +} + +// The auxiliary vector of a process on the Linux system can be read +// from /proc/self/auxv, and tags and values are stored as 8-bytes +// decimal key-value pairs on the 64-bit system. +// +// $ od -t d8 /proc/self/auxv +// +// 0000000 33 140734615224320 +// 0000020 16 3219913727 +// 0000040 6 4096 +// 0000060 17 100 +// 0000100 3 94665627353152 +// 0000120 4 56 +// 0000140 5 9 +// 0000160 7 140425502162944 +// 0000200 8 0 +// 0000220 9 94665627365760 +// 0000240 11 1000 +// 0000260 12 1000 +// 0000300 13 1000 +// 0000320 14 1000 +// 0000340 23 0 +// 0000360 25 140734614619513 +// 0000400 26 0 +// 0000420 31 140734614626284 +// 0000440 15 140734614619529 +// 0000460 0 0 +func readHWCap(auxvFilepath string) (hwCap, error) { + c := hwCap{} + if runtime.GOOS != "linux" { + // Don't try to read Linux-specific /proc files. + return c, fmt.Errorf("readHwCap only supported on linux, not %s", runtime.GOOS) + } + + auxv, err := os.ReadFile(auxvFilepath) + if err != nil { + return c, fmt.Errorf("failed to read file %s: %w", auxvFilepath, err) + } + + l := len(auxv) / 16 + for i := 0; i < l; i++ { + tag := binary.LittleEndian.Uint64(auxv[i*16:]) + val := binary.LittleEndian.Uint64(auxv[i*16+8:]) + if tag == _AT_HWCAP { + c.hwCap1 = val + } else if tag == _AT_HWCAP2 { + c.hwCap2 = val + } + + if (c.hwCap1 != 0) && (c.hwCap2 != 0) { + break + } + } + return c, nil +} + +func initHWCap() { + c, err := readHWCap("/proc/self/auxv") + if err != nil { + log.Warningf("cpuid HWCap not initialized: %w", err) + } else { + hostFeatureSet.hwCap = c + } +} + +var initOnce sync.Once + +// Initialize initializes the global data structures used by this package. +// Must be called prior to using anything else in this package. +func Initialize() { + initOnce.Do(archInitialize) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go index b315107ac4..044eed0796 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64.go @@ -23,6 +23,9 @@ import ( ) // FeatureSet defines features in terms of CPUID leaves and bits. +// The kernel also exposes the presence of features to userspace through +// a set of flags(HWCAP/HWCAP2) bits, exposed in the auxiliary vector, which +// are necessary to read for some features (e.g. FSGSBASE). // // Common references: // @@ -40,6 +43,8 @@ type FeatureSet struct { // This is exported to allow direct calls of the underlying CPUID // function, where required. Function `state:".(Static)"` + // hwCap stores HWCAP1/2 exposed from the elf auxiliary vector. + hwCap hwCap } // saveFunction saves the function as a static query. @@ -355,22 +360,23 @@ func (fs FeatureSet) Intel() bool { // // If xSaveInfo isn't supported, cpuid will not fault but will // return bogus values. -var maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx +var ( + xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx + maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx +) // ExtendedStateSize returns the number of bytes needed to save the "extended -// state" for this processor and the boundary it must be aligned to. Extended -// state includes floating point registers, and other cpu state that's not -// associated with the normal task context. +// state" for the enabled features and the boundary it must be aligned to. +// Extended state includes floating point registers, and other cpu state that's +// not associated with the normal task context. // -// Note: We can save some space here with an optimization where we use a -// smaller chunk of memory depending on features that are actually enabled. -// Currently we just use the largest possible size for simplicity (which is -// about 2.5K worst case, with avx512). +// Note: the return value matches the size of signal FP state frames. +// Look at check_xstate_in_sigframe() in the kernel sources for more details. // //go:nosplit func (fs FeatureSet) ExtendedStateSize() (size, align uint) { if fs.UseXsave() { - return uint(maxXsaveSize), 64 + return uint(xsaveSize), 64 } // If we don't support xsave, we fall back to fxsave, which requires @@ -403,6 +409,19 @@ func (fs FeatureSet) UseXsaveopt() bool { return fs.UseXsave() && fs.HasFeature(X86FeatureXSAVEOPT) } +// UseXsavec returns true if 'fs' supports the "xsavec" instruction. +// +//go:nosplit +func (fs FeatureSet) UseXsavec() bool { + return fs.UseXsaveopt() && fs.HasFeature(X86FeatureXSAVEC) +} + +// UseFSGSBASE returns true if 'fs' supports the (RD|WR)(FS|GS)BASE instructions. +func (fs FeatureSet) UseFSGSBASE() bool { + HWCAP2_FSGSBASE := uint64(1) << 1 + return fs.HasFeature(X86FeatureFSGSBase) && ((fs.hwCap.hwCap2 & HWCAP2_FSGSBASE) != 0) +} + // archCheckHostCompatible checks for compatibility. func (fs FeatureSet) archCheckHostCompatible(hfs FeatureSet) error { // The size of a cache line must match, as it is critical to correctly diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go index d97e11fa8a..8b7c0e1ba1 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_amd64_state_autogen.go @@ -16,6 +16,7 @@ func (fs *FeatureSet) StateTypeName() string { func (fs *FeatureSet) StateFields() []string { return []string{ "Function", + "hwCap", } } @@ -27,12 +28,14 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) { var FunctionValue Static FunctionValue = fs.saveFunction() stateSinkObject.SaveValue(0, FunctionValue) + stateSinkObject.Save(1, &fs.hwCap) } func (fs *FeatureSet) afterLoad() {} // +checklocksignore func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(1, &fs.hwCap) stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(y.(Static)) }) } diff --git a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go index a7c24c7658..7a22e98b3e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go +++ b/vendor/gvisor.dev/gvisor/pkg/cpuid/cpuid_arm64.go @@ -34,7 +34,7 @@ import ( // // +stateify savable type FeatureSet struct { - hwCap uint + hwCap hwCap cpuFreqMHz float64 cpuImplHex uint64 cpuArchDec uint64 @@ -79,15 +79,15 @@ func (fs FeatureSet) ExtendedStateSize() (size, align uint) { // struct user_fpsimd_state { // __uint128_t vregs[32]; // __u32 fpsr; - // __u32 fpcr; - // __u32 __reserved[2]; + // __u32 fpcr; + // __u32 __reserved[2]; // }; return 528, 16 } // HasFeature checks for the presence of a feature. func (fs FeatureSet) HasFeature(feature Feature) bool { - return fs.hwCap&(1<. -// -//go:nosplit -func Memmove(to, from unsafe.Pointer, n uintptr) { - memmove(to, from, n) -} - -//go:linkname memmove runtime.memmove -//go:noescape -func memmove(to, from unsafe.Pointer, n uintptr) - -// Nanotime is runtime.nanotime. -// -//go:nosplit -func Nanotime() int64 { - return nanotime() -} - -//go:linkname nanotime runtime.nanotime -//go:noescape -func nanotime() int64 diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/linkname_go113_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/linkname_go113_unsafe.go new file mode 100644 index 0000000000..2e8c465294 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/linkname_go113_unsafe.go @@ -0,0 +1,51 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.13 + +// //go:linkname directives type-checked by checklinkname. Any other +// non-linkname assumptions outside the Go 1 compatibility guarantee should +// have an accompanied vet check or version guard build tag. + +// Package gohacks contains utilities for subverting the Go compiler. +package gohacks + +import ( + "unsafe" +) + +// Note that go:linkname silently doesn't work if the local name is exported, +// necessitating an indirection for exported functions. + +// Memmove is runtime.memmove, exported for SeqAtomicLoad/SeqAtomicTryLoad. +// +//go:nosplit +func Memmove(to, from unsafe.Pointer, n uintptr) { + memmove(to, from, n) +} + +//go:linkname memmove runtime.memmove +//go:noescape +func memmove(to, from unsafe.Pointer, n uintptr) + +// Nanotime is runtime.nanotime. +// +//go:nosplit +func Nanotime() int64 { + return nanotime() +} + +//go:linkname nanotime runtime.nanotime +//go:noescape +func nanotime() int64 diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/noescape_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/noescape_unsafe.go new file mode 100644 index 0000000000..e6470e33de --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/noescape_unsafe.go @@ -0,0 +1,34 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gohacks + +import ( + "unsafe" +) + +// Noescape hides a pointer from escape analysis. Noescape is the identity +// function but escape analysis doesn't think the output depends on the input. +// Noescape is inlined and currently compiles down to zero instructions. +// USE CAREFULLY! +// +// Noescape is copy/pasted from Go's runtime/stubs.go:noescape(), and is valid +// as of Go 1.20. It is possible that this approach stops working in future +// versions of the toolchain, at which point `p` may still escape. +// +//go:nosplit +func Noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go113_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go113_unsafe.go new file mode 100644 index 0000000000..8ee39f560f --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go113_unsafe.go @@ -0,0 +1,45 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.13 && !go1.20 +// +build go1.13,!go1.20 + +// TODO(go.dev/issue/8422): Remove this once Go 1.19 is no longer supported, +// and update callers to use unsafe.Slice directly. + +package gohacks + +import ( + "unsafe" +) + +// sliceHeader is equivalent to reflect.SliceHeader, but represents the pointer +// to the underlying array as unsafe.Pointer rather than uintptr, allowing +// sliceHeaders to be directly converted to slice objects. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +// Slice returns a slice whose underlying array starts at ptr an which length +// and capacity are len. +func Slice[T any](ptr *T, length int) []T { + var s []T + hdr := (*sliceHeader)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(ptr) + hdr.Len = length + hdr.Cap = length + return s +} diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go120_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go120_unsafe.go new file mode 100644 index 0000000000..9778db863a --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/slice_go120_unsafe.go @@ -0,0 +1,30 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.20 + +package gohacks + +import ( + "unsafe" +) + +// Slice returns a slice whose underlying array starts at ptr an which length +// and capacity are len. +// +// Slice is a wrapper around unsafe.Slice. Prefer to use unsafe.Slice directly +// if possible. +func Slice[T any](ptr *T, length int) []T { + return unsafe.Slice(ptr, length) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go113_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go113_unsafe.go new file mode 100644 index 0000000000..dceeaf5763 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go113_unsafe.go @@ -0,0 +1,51 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.13 && !go1.20 +// +build go1.13,!go1.20 + +// TODO(go.dev/issue/8422): Remove this file once Go 1.19 is no longer +// supported. + +package gohacks + +import ( + "unsafe" +) + +// stringHeader is equivalent to reflect.StringHeader, but represents the +// pointer to the underlying array as unsafe.Pointer rather than uintptr, +// allowing StringHeaders to be directly converted to strings. +type stringHeader struct { + Data unsafe.Pointer + Len int +} + +// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the +// same memory backing s instead of making a heap-allocated copy. This is only +// valid if the returned slice is never mutated. +func ImmutableBytesFromString(s string) []byte { + shdr := (*stringHeader)(unsafe.Pointer(&s)) + return Slice((*byte)(shdr.Data), shdr.Len) +} + +// StringFromImmutableBytes is equivalent to string(bs), except that it uses +// the same memory backing bs instead of making a heap-allocated copy. This is +// only valid if bs is never mutated after StringFromImmutableBytes returns. +func StringFromImmutableBytes(bs []byte) string { + // This is cheaper than messing with StringHeader and SliceHeader, which as + // of this writing produces many dead stores of zeroes. Compare + // strings.Builder.String(). + return *(*string)(unsafe.Pointer(&bs)) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go120_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go120_unsafe.go new file mode 100644 index 0000000000..9005efd6a8 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/gohacks/string_go120_unsafe.go @@ -0,0 +1,39 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.20 + +package gohacks + +import ( + "unsafe" +) + +// ImmutableBytesFromString is equivalent to []byte(s), except that it uses the +// same memory backing s instead of making a heap-allocated copy. This is only +// valid if the returned slice is never mutated. +func ImmutableBytesFromString(s string) []byte { + b := unsafe.StringData(s) + return unsafe.Slice(b, len(s)) +} + +// StringFromImmutableBytes is equivalent to string(bs), except that it uses +// the same memory backing bs instead of making a heap-allocated copy. This is +// only valid if bs is never mutated after StringFromImmutableBytes returns. +func StringFromImmutableBytes(bs []byte) string { + if len(bs) == 0 { + return "" + } + return unsafe.String(&bs[0], len(bs)) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid.go b/vendor/gvisor.dev/gvisor/pkg/goid/goid.go index c4eacdf8c6..1531761593 100644 --- a/vendor/gvisor.dev/gvisor/pkg/goid/goid.go +++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid.go @@ -12,61 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.12 && !go1.21 -// +build go1.12,!go1.21 - -// Check type signatures when updating Go version. - // Package goid provides the Get function. package goid -// Get returns the ID of the current goroutine. -func Get() int64 { - return getg().goid -} - -// Structs from Go runtime. These may change in the future and require -// updating. These structs are currently the same on both AMD64 and ARM64, -// but may diverge in the future. - -type stack struct { - lo uintptr - hi uintptr -} - -type gobuf struct { - sp uintptr - pc uintptr - g uintptr - ctxt uintptr - ret uint64 - lr uintptr - bp uintptr -} - -type g struct { - stack stack - stackguard0 uintptr - stackguard1 uintptr +import ( + _ "runtime" // For facts in assembly files. +) - _panic uintptr - _defer uintptr - m uintptr - sched gobuf - syscallsp uintptr - syscallpc uintptr - stktopsp uintptr - param uintptr - atomicstatus uint32 - stackLock uint32 - goid int64 +// goid returns the current goid, it is defined in assembly. +func goid() int64 - // More fields... - // - // We only use goid and the fields before it are only listed to - // calculate the correct offset. +// Get returns the ID of the current goroutine. +func Get() int64 { + return goid() } - -// Defined in assembly. This can't use go:linkname since runtime.getg() isn't a -// real function, it's a compiler intrinsic. -func getg() *g diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s b/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s index d9f5cd2a35..8d0e9b318f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s +++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_amd64.s @@ -14,8 +14,11 @@ #include "textflag.h" -// func getg() *g -TEXT ·getg(SB),NOSPLIT,$0-8 - MOVQ (TLS), R14 - MOVQ R14, ret+0(FP) - RET +#define GOID_OFFSET 152 // +checkoffset runtime g.goid + +// func goid() int64 +TEXT ·goid(SB),NOSPLIT|NOFRAME,$0-8 + MOVQ (TLS), R14 + MOVQ GOID_OFFSET(R14), R14 + MOVQ R14, ret+0(FP) + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s b/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s index a7465b75d9..07b04a2442 100644 --- a/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s +++ b/vendor/gvisor.dev/gvisor/pkg/goid/goid_arm64.s @@ -14,8 +14,11 @@ #include "textflag.h" -// func getg() *g -TEXT ·getg(SB),NOSPLIT,$0-8 +#define GOID_OFFSET 152 // +checkoffset runtime g.goid + +// func goid() int64 +TEXT ·goid(SB),NOSPLIT,$0-8 MOVD g, R0 // g + MOVD GOID_OFFSET(R0), R0 MOVD R0, ret+0(FP) RET diff --git a/vendor/gvisor.dev/gvisor/pkg/state/decode.go b/vendor/gvisor.dev/gvisor/pkg/state/decode.go index cb25d7fbcb..777d77689a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/decode.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/decode.go @@ -21,7 +21,6 @@ import ( "math" "reflect" - "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/state/wire" ) @@ -660,9 +659,9 @@ func (ds *decodeState) Load(obj reflect.Value) { numDeferred++ if s, ok := encoded.(*wire.Struct); ok && s.TypeID != 0 { typ := ds.types.LookupType(typeID(s.TypeID)) - log.Warningf("unused deferred object: ID %d, type %v", id, typ) + Failf("unused deferred object: ID %d, type %v", id, typ) } else { - log.Warningf("unused deferred object: ID %d, %#v", id, encoded) + Failf("unused deferred object: ID %d, %#v", id, encoded) } } if numDeferred != 0 { diff --git a/vendor/gvisor.dev/gvisor/pkg/state/types.go b/vendor/gvisor.dev/gvisor/pkg/state/types.go index 8df2ac64a5..b96423e140 100644 --- a/vendor/gvisor.dev/gvisor/pkg/state/types.go +++ b/vendor/gvisor.dev/gvisor/pkg/state/types.go @@ -324,6 +324,14 @@ var globalTypeDatabase = map[string]reflect.Type{} // reverseTypeDatabase is a reverse mapping. var reverseTypeDatabase = map[reflect.Type]string{} +// Release releases references to global type databases. +// Must only be called in contexts where they will definitely never be used, +// in order to save memory. +func Release() { + globalTypeDatabase = nil + reverseTypeDatabase = nil +} + // Register registers a type. // // This must be called on init and only done once. diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go index 10261274ee..5e4ec51c13 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_ancestors_unsafe.go @@ -324,8 +324,7 @@ func (shard *ancestorsapmShard) rehash(oldSlots unsafe.Pointer) { } newSlotsSlice := make([]ancestorsapmSlot, newSize) - newSlotsHeader := (*gohacks.SliceHeader)(unsafe.Pointer(&newSlotsSlice)) - newSlots := newSlotsHeader.Data + newSlots := unsafe.Pointer(&newSlotsSlice[0]) newMask := newSize - 1 shard.dirtyMu.Lock() diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go index f778138dcf..8ac8d98c46 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/locking/atomicptrmap_goroutine_unsafe.go @@ -324,8 +324,7 @@ func (shard *goroutineLocksapmShard) rehash(oldSlots unsafe.Pointer) { } newSlotsSlice := make([]goroutineLocksapmSlot, newSize) - newSlotsHeader := (*gohacks.SliceHeader)(unsafe.Pointer(&newSlotsSlice)) - newSlots := newSlotsHeader.Data + newSlots := unsafe.Pointer(&newSlotsSlice[0]) newMask := newSize - 1 shard.dirtyMu.Lock() diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/locking/lockdep.go b/vendor/gvisor.dev/gvisor/pkg/sync/locking/lockdep.go index 092a5765ea..871466c36b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/locking/lockdep.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/locking/lockdep.go @@ -72,9 +72,23 @@ type goroutineLocks map[*MutexClass]bool var routineLocks goroutineLocksAtomicPtrMap +// maxChainLen is the maximum length of a lock chain. +const maxChainLen = 32 + // checkLock checks that class isn't in the ancestors of prevClass. func checkLock(class *MutexClass, prevClass *MutexClass, chain []*MutexClass) { chain = append(chain, prevClass) + if len(chain) >= maxChainLen { + // It can be a race condition with another thread that added + // the lock to the graph but don't complete the validation. + var b strings.Builder + fmt.Fprintf(&b, "WARNING: The maximum lock depth has been reached: %s", chain[0]) + for i := 1; i < len(chain); i++ { + fmt.Fprintf(&b, "-> %s", chain[i]) + } + log.Warningf("%s", b.String()) + return + } if c := prevClass.ancestors.Load(class); c != nil { var b strings.Builder fmt.Fprintf(&b, "WARNING: circular locking detected: %s -> %s:\n%s\n", @@ -118,10 +132,14 @@ func AddGLock(class *MutexClass, lockNameIndex int) { return } + if (*currentLocks)[class] { + panic(fmt.Sprintf("nested locking: %s:\n%s", class, log.LocalStack(2))) + } + (*currentLocks)[class] = true // Check dependencies and add locked mutexes to the ancestors list. for prevClass := range *currentLocks { if prevClass == class { - panic(fmt.Sprintf("nested locking: %s:\n%s", class, log.LocalStack(2))) + continue } checkLock(class, prevClass, nil) @@ -130,7 +148,6 @@ func AddGLock(class *MutexClass, lockNameIndex int) { class.ancestors.Store(prevClass, &stacks) } } - (*currentLocks)[class] = true } // DelGLock deletes a lock from the current goroutine. diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/race_amd64.s b/vendor/gvisor.dev/gvisor/pkg/sync/race_amd64.s index 1996023878..c99481401d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/race_amd64.s +++ b/vendor/gvisor.dev/gvisor/pkg/sync/race_amd64.s @@ -18,7 +18,7 @@ #include "textflag.h" // func RaceUncheckedAtomicCompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool -TEXT ·RaceUncheckedAtomicCompareAndSwapUintptr(SB),NOSPLIT,$0-25 +TEXT ·RaceUncheckedAtomicCompareAndSwapUintptr(SB),NOSPLIT|NOFRAME,$0-25 MOVQ ptr+0(FP), DI MOVQ old+8(FP), AX MOVQ new+16(FP), SI diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime.go new file mode 100644 index 0000000000..e4604e83ed --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime.go @@ -0,0 +1,22 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +import ( + "runtime" +) + +// Dummy reference for facts. +const _ = runtime.Compiler diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.go index a9d883e4c0..dad10bfef2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.go @@ -3,8 +3,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 && go1.8 && !go1.21 && !goexperiment.staticlockranking -// +build amd64,go1.8,!go1.21,!goexperiment.staticlockranking +//go:build amd64 package sync diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go new file mode 100644 index 0000000000..9a5a47a824 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_constants.go @@ -0,0 +1,29 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go. +const ( + WaitReasonSelect uint8 = 9 // +checkconst runtime waitReasonSelect + WaitReasonChanReceive uint8 = 14 // +checkconst runtime waitReasonChanReceive + WaitReasonSemacquire uint8 = 18 // +checkconst runtime waitReasonSemacquire +) + +// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go. +const ( + TraceEvGoBlockRecv byte = 23 // +checkconst runtime traceEvGoBlockRecv + TraceEvGoBlockSelect byte = 24 // +checkconst runtime traceEvGoBlockSelect + TraceEvGoBlockSync byte = 25 // +checkconst runtime traceEvGoBlockSync +) diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.s b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_amd64.s similarity index 72% rename from vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.s rename to vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_amd64.s index 252dda1bb8..37f69471fd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_amd64.s +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_amd64.s @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build amd64 && go1.14 && !go1.21 && !goexperiment.staticlockranking -// +build amd64,go1.14,!go1.21,!goexperiment.staticlockranking +//go:build amd64 #include "textflag.h" -TEXT ·addrOfSpinning(SB),NOSPLIT,$0-8 - // The offset specified here is the nmspinning value in sched. +#define NMSPINNING_OFFSET 92 // +checkoffset runtime schedt.nmspinning + +TEXT ·addrOfSpinning(SB),NOSPLIT|NOFRAME,$0-8 LEAQ runtime·sched(SB), AX - ADDQ $92, AX + ADDQ $NMSPINNING_OFFSET, AX MOVQ AX, ret+0(FP) RET diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s new file mode 100644 index 0000000000..85501e54c2 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_spinning_other.s @@ -0,0 +1,18 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !amd64 + +// This file is intentionally left blank. Other arches don't use +// addrOfSpinning, but we still need an input to the nogo temlate rule. diff --git a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go index 05239282bc..91cda67bb0 100644 --- a/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/sync/runtime_unsafe.go @@ -3,8 +3,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.13 && !go1.21 -// +build go1.13,!go1.21 +//go:build go1.18 && !go1.22 +// +build go1.18,!go1.22 // //go:linkname directives type-checked by checklinkname. Any other // non-linkname assumptions outside the Go 1 compatibility guarantee should @@ -78,20 +78,6 @@ func Goready(gp uintptr, traceskip int, wakep bool) { } } -// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go. -const ( - WaitReasonSelect uint8 = 9 - WaitReasonChanReceive uint8 = 14 - WaitReasonSemacquire uint8 = 18 -) - -// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go. -const ( - TraceEvGoBlockRecv byte = 23 - TraceEvGoBlockSelect byte = 24 - TraceEvGoBlockSync byte = 25 -) - // Rand32 returns a non-cryptographically-secure random uint32. func Rand32() uint32 { return fastrand() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go index 6c4f3d2d0d..5e91f3dd5a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/adapters/gonet/gonet.go @@ -24,7 +24,6 @@ import ( "net" "time" - "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -454,7 +453,6 @@ func (c *TCPConn) LocalAddr() net.Addr { func (c *TCPConn) RemoteAddr() net.Addr { a, err := c.ep.GetRemoteAddress() if err != nil { - log.Warningf("ep.GetRemoteAddress() failed: %v", err) return nil } return fullToTCPAddr(a) @@ -471,11 +469,11 @@ func (c *TCPConn) newOpError(op string, err error) *net.OpError { } func fullToTCPAddr(addr tcpip.FullAddress) *net.TCPAddr { - return &net.TCPAddr{IP: net.IP(addr.Addr), Port: int(addr.Port)} + return &net.TCPAddr{IP: net.IP(addr.Addr.AsSlice()), Port: int(addr.Port)} } func fullToUDPAddr(addr tcpip.FullAddress) *net.UDPAddr { - return &net.UDPAddr{IP: net.IP(addr.Addr), Port: int(addr.Port)} + return &net.UDPAddr{IP: net.IP(addr.Addr.AsSlice()), Port: int(addr.Port)} } // DialTCP creates a new TCPConn connected to the specified address. @@ -623,7 +621,6 @@ func (c *UDPConn) newRemoteOpError(op string, remote net.Addr, err error) *net.O func (c *UDPConn) RemoteAddr() net.Addr { a, err := c.ep.GetRemoteAddress() if err != nil { - log.Warningf("ep.GetRemoteAddress() failed: %v", err) return nil } return fullToUDPAddr(a) @@ -667,7 +664,7 @@ func (c *UDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { if addr != nil { ua := addr.(*net.UDPAddr) writeOptions.To = &tcpip.FullAddress{ - Addr: tcpip.Address(ua.IP), + Addr: tcpip.AddrFromSlice(ua.IP), Port: uint16(ua.Port), } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go index d2e0191516..fa6e8bc3a7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum.go @@ -30,30 +30,8 @@ func Put(b []byte, xsum uint16) { binary.BigEndian.PutUint16(b, xsum) } -func calculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) { - v := initial - - if odd { - v += uint32(buf[0]) - buf = buf[1:] - } - - l := len(buf) - odd = l&1 != 0 - if odd { - l-- - v += uint32(buf[l]) << 8 - } - - for i := 0; i < l; i += 2 { - v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) - } - - return Combine(uint16(v), uint16(v>>16)), odd -} - -func unrolledCalculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) { - v := initial +func unrolledCalculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { + v := uint32(initial) if odd { v += uint32(buf[0]) @@ -167,24 +145,13 @@ func unrolledCalculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bo return Combine(uint16(v), uint16(v>>16)), odd } -// Old calculates the checksum (as defined in RFC 1071) of the bytes in -// the given byte array. This function uses a non-optimized implementation. Its -// only retained for reference and to use as a benchmark/test. Most code should -// use the header.Checksum function. -// -// The initial checksum must have been computed on an even number of bytes. -func Old(buf []byte, initial uint16) uint16 { - s, _ := calculateChecksum(buf, false, uint32(initial)) - return s -} - // Checksum calculates the checksum (as defined in RFC 1071) of the bytes in the -// given byte array. This function uses an optimized unrolled version of the -// checksum algorithm. +// given byte array. This function uses an optimized version of the checksum +// algorithm. // // The initial checksum must have been computed on an even number of bytes. func Checksum(buf []byte, initial uint16) uint16 { - s, _ := unrolledCalculateChecksum(buf, false, uint32(initial)) + s, _ := calculateChecksum(buf, false, initial) return s } @@ -197,7 +164,7 @@ type Checksumer struct { // Add adds b to checksum. func (c *Checksumer) Add(b []byte) { if len(b) > 0 { - c.sum, c.odd = unrolledCalculateChecksum(b, c.odd, uint32(c.sum)) + c.sum, c.odd = calculateChecksum(b, c.odd, c.sum) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go new file mode 100644 index 0000000000..21ecd12a87 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build amd64 +// +build amd64 + +package checksum + +// Note: odd indicates whether initial is a partial checksum over an odd number +// of bytes. +// +// calculateChecksum is defined in assembly. +func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s new file mode 100644 index 0000000000..c2a97a647b --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64.s @@ -0,0 +1,138 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build amd64 +// +build amd64 + +#include "textflag.h" + +// calculateChecksum computes the checksum of a slice, taking into account a +// previously computed initial value and whether the first byte is a lower or +// upper byte. +// +// It utilizes byte order independence and parallel summation as described in +// RFC 1071 1.2. +// +// The best way to understand this function is to understand +// checksum_noasm_unsafe.go first, which implements largely the same logic. +// Using assembly speeds things up via ADC (add with carry). +TEXT ·calculateChecksum(SB),NOSPLIT|NOFRAME,$0-35 + // Store arguments in registers. + MOVW initial+26(FP), AX + MOVQ buf_len+8(FP), BX + MOVQ buf_base+0(FP), CX + XORQ R8, R8 + MOVB odd+24(FP), R8 + + // Account for a previous odd number of bytes. + // + // if odd { + // initial += buf[0] + // buf = buf[1:] + // } + CMPB R8, $0 + JE newlyodd + XORQ R9, R9 + MOVB (CX), R9 + ADDW R9, AX + ADCW $0, AX + INCQ CX + DECQ BX + + // See whether we're checksumming an odd number of bytes. If so, the final + // byte is a big endian most significant byte, and so needs to be shifted. + // + // odd = buf_len%2 != 0 + // if odd { + // buf_len-- + // initial += buf[buf_len]<<8 + // } +newlyodd: + XORQ R8, R8 + TESTQ $1, BX + JZ swaporder + MOVB $1, R8 + DECQ BX + XORQ R10, R10 + MOVB (CX)(BX*1), R10 + SHLQ $8, R10 + ADDW R10, AX + ADCW $0, AX + +swaporder: + // Load initial in network byte order. + BSWAPQ AX + SHRQ $48, AX + + // Accumulate 8 bytes at a time. + // + // while buf_len >= 8 { + // acc, carry = acc + *(uint64 *)(buf) + carry + // buf_len -= 8 + // buf = buf[8:] + // } + // acc += carry + JMP addcond +addloop: + ADDQ (CX), AX + ADCQ $0, AX + SUBQ $8, BX + ADDQ $8, CX +addcond: + CMPQ BX, $8 + JAE addloop + + // TODO(krakauer): We can do 4 byte accumulation too. + + // Accumulate the rest 2 bytes at a time. + // + // while buf_len > 0 { + // acc, carry = acc + *(uint16 *)(buf) + // buf_len -= 2 + // buf = buf[2:] + // } + JMP slowaddcond +slowaddloop: + XORQ DX, DX + MOVW (CX), DX + ADDQ DX, AX + ADCQ $0, AX + SUBQ $2, BX + ADDQ $2, CX +slowaddcond: + CMPQ BX, $2 + JAE slowaddloop + + // Fold into 16 bits. + // + // for acc > math.MaxUint16 { + // acc = (acc & 0xffff) + acc>>16 + // } + JMP foldcond +foldloop: + MOVQ AX, DX + ANDQ $0xffff, DX + SHRQ $16, AX + ADDQ DX, AX + // We don't need ADC because folding will take care of it +foldcond: + CMPQ AX, $0xffff + JA foldloop + + // Return the checksum in host byte order. + BSWAPQ AX + SHRQ $48, AX + MOVW AX, ret+32(FP) + MOVB R8, ret1+34(FP) + RET diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go new file mode 100644 index 0000000000..4d4b3d5780 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_amd64_state_autogen.go @@ -0,0 +1,6 @@ +// automatically generated by stateify. + +//go:build amd64 +// +build amd64 + +package checksum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go new file mode 100644 index 0000000000..dee24e1963 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_noasm_unsafe.go @@ -0,0 +1,80 @@ +// Copyright 2023 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !amd64 +// +build !amd64 + +package checksum + +import ( + "math" + "math/bits" + "unsafe" +) + +// Note: odd indicates whether initial is a partial checksum over an odd number +// of bytes. +func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) { + // Note: we can probably remove unrolledCalculateChecksum altogether, + // but I don't have any 32 bit machines to benchmark on. + if bits.UintSize != 64 { + return unrolledCalculateChecksum(buf, odd, initial) + } + + // Utilize byte order independence and parallel summation as + // described in RFC 1071 1.2. + + // It doesn't matter what endianness we use, only that it's + // consistent throughout the calculation. See RFC 1071 1.2.B. + acc := uint(((initial & 0xff00) >> 8) | ((initial & 0x00ff) << 8)) + + // Account for initial having been calculated over an odd number of + // bytes. + if odd { + acc += uint(buf[0]) << 8 + buf = buf[1:] + } + + // See whether we're checksumming an odd number of bytes. If + // so, the final byte is a big endian most significant byte. + odd = len(buf)%2 != 0 + if odd { + acc += uint(buf[len(buf)-1]) + buf = buf[:len(buf)-1] + } + + // Compute the checksum 8 bytes at a time. + var carry uint + for len(buf) >= 8 { + acc, carry = bits.Add(acc, *(*uint)(unsafe.Pointer(&buf[0])), carry) + buf = buf[8:] + } + + // Compute the remainder 2 bytes at a time. We are guaranteed that + // len(buf) is even due to the above handling of odd-length buffers. + for len(buf) > 0 { + acc, carry = bits.Add(acc, uint(*(*uint16)(unsafe.Pointer(&buf[0]))), carry) + buf = buf[2:] + } + acc += carry + + // Fold the checksum into 16 bits. + for acc > math.MaxUint16 { + acc = (acc & 0xffff) + acc>>16 + } + + // Swap the byte order before returning. + acc = ((acc & 0xff00) >> 8) | ((acc & 0x00ff) << 8) + return uint16(acc), odd +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go new file mode 100644 index 0000000000..b5135383bf --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/checksum/checksum_unsafe_state_autogen.go @@ -0,0 +1,6 @@ +// automatically generated by stateify. + +//go:build !amd64 +// +build !amd64 + +package checksum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go index ff0a7be4f8..78cc9fdd48 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/errors.go @@ -394,6 +394,32 @@ func (*ErrHostUnreachable) IgnoreStats() bool { } func (*ErrHostUnreachable) String() string { return "no route to host" } +// ErrHostDown indicates that a destination host is down. +// +// +stateify savable +type ErrHostDown struct{} + +func (*ErrHostDown) isError() {} + +// IgnoreStats implements Error. +func (*ErrHostDown) IgnoreStats() bool { + return false +} +func (*ErrHostDown) String() string { return "host is down" } + +// ErrNoNet indicates that the host is not on the network. +// +// +stateify savable +type ErrNoNet struct{} + +func (*ErrNoNet) isError() {} + +// IgnoreStats implements Error. +func (*ErrNoNet) IgnoreStats() bool { + return false +} +func (*ErrNoNet) String() string { return "machine is not on the network" } + // ErrNoSuchFile is used to indicate that ENOENT should be returned the to // calling application. // diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go index 8fc4361566..1cdda6b909 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/checksum.go @@ -28,13 +28,13 @@ import ( // destination protocol and network address. Pseudo-headers are needed by // transport layers when calculating their own checksum. func PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, srcAddr tcpip.Address, dstAddr tcpip.Address, totalLen uint16) uint16 { - xsum := checksum.Checksum([]byte(srcAddr), 0) - xsum = checksum.Checksum([]byte(dstAddr), xsum) + xsum := checksum.Checksum(srcAddr.AsSlice(), 0) + xsum = checksum.Checksum(dstAddr.AsSlice(), xsum) // Add the length portion of the checksum to the pseudo-checksum. - tmp := make([]byte, 2) - binary.BigEndian.PutUint16(tmp, totalLen) - xsum = checksum.Checksum(tmp, xsum) + var tmp [2]byte + binary.BigEndian.PutUint16(tmp[:], totalLen) + xsum = checksum.Checksum(tmp[:], xsum) return checksum.Checksum([]byte{0, uint8(protocol)}, xsum) } @@ -68,14 +68,17 @@ func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new tcpip.Address) uint16 { const uint16Bytes = 2 - if len(old) != len(new) { - panic(fmt.Sprintf("buffer lengths are different; old = %d, new = %d", len(old), len(new))) + if old.BitLen() != new.BitLen() { + panic(fmt.Sprintf("buffer lengths are different; old = %d, new = %d", old.BitLen()/8, new.BitLen()/8)) } - if len(old)%uint16Bytes != 0 { - panic(fmt.Sprintf("buffer has an odd number of bytes; got = %d", len(old))) + if oldBytes := old.BitLen() % 16; oldBytes != 0 { + panic(fmt.Sprintf("buffer has an odd number of bytes; got = %d", oldBytes)) } + oldAddr := old.AsSlice() + newAddr := new.AsSlice() + // As per RFC 1071 page 4, // (4) Incremental Update // @@ -89,12 +92,12 @@ func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new tcpip.Address) uint // checksum C, the new checksum C' is: // // C' = C + (-m) + m' = C + (m' - m) - for len(old) != 0 { + for len(oldAddr) != 0 { // Convert the 2 byte sequences to uint16 values then apply the increment // update. - xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(old[0])<<8)+uint16(old[1]), (uint16(new[0])<<8)+uint16(new[1])) - old = old[uint16Bytes:] - new = new[uint16Bytes:] + xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(oldAddr[0])<<8)+uint16(oldAddr[1]), (uint16(newAddr[0])<<8)+uint16(newAddr[1])) + oldAddr = oldAddr[uint16Bytes:] + newAddr = newAddr[uint16Bytes:] } return xsum diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go index 1f18213e52..11230ff4e4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/eth.go @@ -159,10 +159,11 @@ func EthernetAddressFromMulticastIPv4Address(addr tcpip.Address) tcpip.LinkAddre // address by placing the low-order 23-bits of the IP address // into the low-order 23 bits of the Ethernet multicast address // 01-00-5E-00-00-00 (hex). + addrBytes := addr.As4() linkAddrBytes[0] = 0x1 linkAddrBytes[2] = 0x5e - linkAddrBytes[3] = addr[1] & 0x7F - copy(linkAddrBytes[4:], addr[IPv4AddressSize-2:]) + linkAddrBytes[3] = addrBytes[1] & 0x7F + copy(linkAddrBytes[4:], addrBytes[IPv4AddressSize-2:]) return tcpip.LinkAddress(linkAddrBytes[:]) } @@ -180,7 +181,8 @@ func EthernetAddressFromMulticastIPv6Address(addr tcpip.Address) tcpip.LinkAddre // transmitted to the Ethernet multicast address whose first // two octets are the value 3333 hexadecimal and whose last // four octets are the last four octets of DST. - linkAddrBytes := []byte(addr[IPv6AddressSize-EthernetAddressSize:]) + addrBytes := addr.As16() + linkAddrBytes := []byte(addrBytes[IPv6AddressSize-EthernetAddressSize:]) linkAddrBytes[0] = 0x33 linkAddrBytes[1] = 0x33 return tcpip.LinkAddress(linkAddrBytes[:]) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv4.go index dd385b4551..abb64db3be 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv4.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv4.go @@ -106,14 +106,22 @@ const ( // ICMP codes for ICMPv4 Destination Unreachable messages as defined in RFC 792, // RFC 1122 section 3.2.2.1 and RFC 1812 section 5.2.7.1. const ( - ICMPv4NetUnreachable ICMPv4Code = 0 - ICMPv4HostUnreachable ICMPv4Code = 1 - ICMPv4ProtoUnreachable ICMPv4Code = 2 - ICMPv4PortUnreachable ICMPv4Code = 3 - ICMPv4FragmentationNeeded ICMPv4Code = 4 - ICMPv4NetProhibited ICMPv4Code = 9 - ICMPv4HostProhibited ICMPv4Code = 10 - ICMPv4AdminProhibited ICMPv4Code = 13 + ICMPv4NetUnreachable ICMPv4Code = 0 + ICMPv4HostUnreachable ICMPv4Code = 1 + ICMPv4ProtoUnreachable ICMPv4Code = 2 + ICMPv4PortUnreachable ICMPv4Code = 3 + ICMPv4FragmentationNeeded ICMPv4Code = 4 + ICMPv4SourceRouteFailed ICMPv4Code = 5 + ICMPv4DestinationNetworkUnknown ICMPv4Code = 6 + ICMPv4DestinationHostUnknown ICMPv4Code = 7 + ICMPv4SourceHostIsolated ICMPv4Code = 8 + ICMPv4NetProhibited ICMPv4Code = 9 + ICMPv4HostProhibited ICMPv4Code = 10 + ICMPv4NetUnreachableForTos ICMPv4Code = 11 + ICMPv4HostUnreachableForTos ICMPv4Code = 12 + ICMPv4AdminProhibited ICMPv4Code = 13 + ICMPv4HostPrecedenceViolation ICMPv4Code = 14 + ICMPv4PrecedenceCutInEffect ICMPv4Code = 15 ) // ICMPv4UnusedCode is a code to use in ICMP messages where no code is needed. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go index d4b24f81bb..4e75ac4005 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/icmpv6.go @@ -121,6 +121,10 @@ const ( ICMPv6MulticastListenerQuery ICMPv6Type = 130 ICMPv6MulticastListenerReport ICMPv6Type = 131 ICMPv6MulticastListenerDone ICMPv6Type = 132 + + // Multicast Listener Discovert Version 2 (MLDv2) messages, see RFC 3810. + + ICMPv6MulticastListenerV2Report ICMPv6Type = 143 ) // IsErrorType returns true if the receiver is an ICMP error type. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmp.go index 94c057f24a..555ab52e54 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmp.go @@ -94,6 +94,8 @@ const ( // IGMPLeaveGroup indicates that the message type is a Leave Group // notification message. IGMPLeaveGroup IGMPType = 0x17 + // IGMPv3MembershipReport indicates that the message type is a IGMPv3 report. + IGMPv3MembershipReport IGMPType = 0x22 ) // Type is the IGMP type field. @@ -112,7 +114,7 @@ func (b IGMP) MaxRespTime() time.Duration { // messages, and specifies the maximum allowed time before sending a // responding report in units of 1/10 second. In all other messages, it // is set to zero by the sender and ignored by receivers. - return DecisecondToDuration(b[igmpMaxRespTimeOffset]) + return DecisecondToDuration(uint16(b[igmpMaxRespTimeOffset])) } // SetMaxRespTime sets the MaxRespTimeField. @@ -130,12 +132,13 @@ func (b IGMP) SetChecksum(checksum uint16) { // GroupAddress gets the Group Address field. func (b IGMP) GroupAddress() tcpip.Address { - return tcpip.Address(b[igmpGroupAddressOffset:][:IPv4AddressSize]) + return tcpip.AddrFrom4([4]byte(b[igmpGroupAddressOffset:][:IPv4AddressSize])) } // SetGroupAddress sets the Group Address field. func (b IGMP) SetGroupAddress(address tcpip.Address) { - if n := copy(b[igmpGroupAddressOffset:], address); n != IPv4AddressSize { + addrBytes := address.As4() + if n := copy(b[igmpGroupAddressOffset:], addrBytes[:]); n != IPv4AddressSize { panic(fmt.Sprintf("copied %d bytes, expected %d", n, IPv4AddressSize)) } } @@ -177,6 +180,6 @@ func IGMPCalculateChecksum(h IGMP) uint16 { // DecisecondToDuration converts a value representing deci-seconds to a // time.Duration. -func DecisecondToDuration(ds uint8) time.Duration { +func DecisecondToDuration(ds uint16) time.Duration { return time.Duration(ds) * time.Second / 10 } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go new file mode 100644 index 0000000000..523441e85d --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/igmpv3.go @@ -0,0 +1,502 @@ +// Copyright 2022 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package header + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" +) + +var ( + // IGMPv3RoutersAddress is the address to send IGMPv3 reports to. + // + // As per RFC 3376 section 4.2.14, + // + // Version 3 Reports are sent with an IP destination address of + // 224.0.0.22, to which all IGMPv3-capable multicast routers listen. + IGMPv3RoutersAddress = tcpip.AddrFrom4([4]byte{0xe0, 0x00, 0x00, 0x16}) +) + +const ( + // IGMPv3QueryMinimumSize is the mimum size of a valid IGMPv3 query, + // as per RFC 3376 section 4.1. + IGMPv3QueryMinimumSize = 12 + + igmpv3QueryMaxRespCodeOffset = 1 + igmpv3QueryGroupAddressOffset = 4 + igmpv3QueryResvSQRVOffset = 8 + igmpv3QueryQRVMask = 0b111 + igmpv3QueryQQICOffset = 9 + igmpv3QueryNumberOfSourcesOffset = 10 + igmpv3QuerySourcesOffset = 12 +) + +// IGMPv3Query is an IGMPv3 query message. +// +// As per RFC 3376 section 4.1, +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0x11 | Max Resp Code | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Group Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Resv |S| QRV | QQIC | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Source Address [1] | +// +- -+ +// | Source Address [2] | +// +- . -+ +// . . . +// . . . +// +- -+ +// | Source Address [N] | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv3Query IGMP + +// MaximumResponseCode returns the Maximum Response Code. +func (i IGMPv3Query) MaximumResponseCode() uint8 { + return i[igmpv3QueryMaxRespCodeOffset] +} + +// IGMPv3MaximumResponseDelay returns the Maximum Response Delay in an IGMPv3 +// Maximum Response Code. +// +// As per RFC 3376 section 4.1.1, +// +// The Max Resp Code field specifies the maximum time allowed before +// sending a responding report. The actual time allowed, called the Max +// Resp Time, is represented in units of 1/10 second and is derived from +// the Max Resp Code as follows: +// +// If Max Resp Code < 128, Max Resp Time = Max Resp Code +// +// If Max Resp Code >= 128, Max Resp Code represents a floating-point +// value as follows: +// +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |1| exp | mant | +// +-+-+-+-+-+-+-+-+ +// +// Max Resp Time = (mant | 0x10) << (exp + 3) +// +// Small values of Max Resp Time allow IGMPv3 routers to tune the "leave +// latency" (the time between the moment the last host leaves a group +// and the moment the routing protocol is notified that there are no +// more members). Larger values, especially in the exponential range, +// allow tuning of the burstiness of IGMP traffic on a network. +func IGMPv3MaximumResponseDelay(codeRaw uint8) time.Duration { + code := uint16(codeRaw) + if code < 128 { + return DecisecondToDuration(code) + } + + const mantBits = 4 + const expMask = 0b111 + exp := (code >> mantBits) & expMask + mant := code & ((1 << mantBits) - 1) + return DecisecondToDuration((mant | 0x10) << (exp + 3)) +} + +// GroupAddress returns the group address. +func (i IGMPv3Query) GroupAddress() tcpip.Address { + return tcpip.AddrFrom4([4]byte(i[igmpv3QueryGroupAddressOffset:][:IPv4AddressSize])) +} + +// QuerierRobustnessVariable returns the querier's robustness variable. +func (i IGMPv3Query) QuerierRobustnessVariable() uint8 { + return i[igmpv3QueryResvSQRVOffset] & igmpv3QueryQRVMask +} + +// QuerierQueryInterval returns the querier's query interval. +func (i IGMPv3Query) QuerierQueryInterval() time.Duration { + return mldv2AndIGMPv3QuerierQueryCodeToInterval(i[igmpv3QueryQQICOffset]) +} + +// Sources returns an iterator over source addresses in the query. +// +// Returns false if the message cannot hold the expected number of sources. +func (i IGMPv3Query) Sources() (AddressIterator, bool) { + return makeAddressIterator( + i[igmpv3QuerySourcesOffset:], + binary.BigEndian.Uint16(i[igmpv3QueryNumberOfSourcesOffset:]), + IPv4AddressSize, + ) +} + +// IGMPv3ReportRecordType is the type of an IGMPv3 multicast address record +// found in an IGMPv3 report, as per RFC 3810 section 5.2.12. +type IGMPv3ReportRecordType int + +// IGMPv3 multicast address record types, as per RFC 3810 section 5.2.12. +const ( + IGMPv3ReportRecordModeIsInclude IGMPv3ReportRecordType = 1 + IGMPv3ReportRecordModeIsExclude IGMPv3ReportRecordType = 2 + IGMPv3ReportRecordChangeToIncludeMode IGMPv3ReportRecordType = 3 + IGMPv3ReportRecordChangeToExcludeMode IGMPv3ReportRecordType = 4 + IGMPv3ReportRecordAllowNewSources IGMPv3ReportRecordType = 5 + IGMPv3ReportRecordBlockOldSources IGMPv3ReportRecordType = 6 +) + +const ( + igmpv3ReportGroupAddressRecordMinimumSize = 8 + igmpv3ReportGroupAddressRecordTypeOffset = 0 + igmpv3ReportGroupAddressRecordAuxDataLenOffset = 1 + igmpv3ReportGroupAddressRecordAuxDataLenUnits = 4 + igmpv3ReportGroupAddressRecordNumberOfSourcesOffset = 2 + igmpv3ReportGroupAddressRecordGroupAddressOffset = 4 + igmpv3ReportGroupAddressRecordSourcesOffset = 8 +) + +// IGMPv3ReportGroupAddressRecordSerializer is an IGMPv3 Multicast Address +// Record serializer. +// +// As per RFC 3810 section 5.2, a Multicast Address Record has the following +// internal format: +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Multicast Address * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Source Address [1] * +// | | +// * * +// | | +// +- -+ +// | | +// * * +// | | +// * Source Address [2] * +// | | +// * * +// | | +// +- -+ +// . . . +// . . . +// . . . +// +- -+ +// | | +// * * +// | | +// * Source Address [N] * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Auxiliary Data . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv3ReportGroupAddressRecordSerializer struct { + RecordType IGMPv3ReportRecordType + GroupAddress tcpip.Address + Sources []tcpip.Address +} + +// Length returns the number of bytes this serializer would occupy. +func (s *IGMPv3ReportGroupAddressRecordSerializer) Length() int { + return igmpv3ReportGroupAddressRecordSourcesOffset + len(s.Sources)*IPv4AddressSize +} + +func copyIPv4Address(dst []byte, src tcpip.Address) { + srcBytes := src.As4() + if n := copy(dst, srcBytes[:]); n != IPv4AddressSize { + panic(fmt.Sprintf("got copy(...) = %d, want = %d", n, IPv4AddressSize)) + } +} + +// SerializeInto serializes the record into the buffer. +// +// Panics if the buffer does not have enough space to fit the record. +func (s *IGMPv3ReportGroupAddressRecordSerializer) SerializeInto(b []byte) { + b[igmpv3ReportGroupAddressRecordTypeOffset] = byte(s.RecordType) + b[igmpv3ReportGroupAddressRecordAuxDataLenOffset] = 0 + binary.BigEndian.PutUint16(b[igmpv3ReportGroupAddressRecordNumberOfSourcesOffset:], uint16(len(s.Sources))) + copyIPv4Address(b[igmpv3ReportGroupAddressRecordGroupAddressOffset:], s.GroupAddress) + b = b[igmpv3ReportGroupAddressRecordSourcesOffset:] + for _, source := range s.Sources { + copyIPv4Address(b, source) + b = b[IPv4AddressSize:] + } +} + +const ( + igmpv3ReportTypeOffset = 0 + igmpv3ReportReserved1Offset = 1 + igmpv3ReportReserved2Offset = 4 + igmpv3ReportNumberOfGroupAddressRecordsOffset = 6 + igmpv3ReportGroupAddressRecordsOffset = 8 +) + +// IGMPv3ReportSerializer is an MLD Version 2 Report serializer. +// +// As per RFC 3810 section 5.2, +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 143 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved |Nr of Mcast Address Records (M)| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [1] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [2] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | . | +// . . . +// | . | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [M] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv3ReportSerializer struct { + Records []IGMPv3ReportGroupAddressRecordSerializer +} + +// Length returns the number of bytes this serializer would occupy. +func (s *IGMPv3ReportSerializer) Length() int { + ret := igmpv3ReportGroupAddressRecordsOffset + for _, record := range s.Records { + ret += record.Length() + } + return ret +} + +// SerializeInto serializes the report into the buffer. +// +// Panics if the buffer does not have enough space to fit the report. +func (s *IGMPv3ReportSerializer) SerializeInto(b []byte) { + b[igmpv3ReportTypeOffset] = byte(IGMPv3MembershipReport) + b[igmpv3ReportReserved1Offset] = 0 + binary.BigEndian.PutUint16(b[igmpv3ReportReserved2Offset:], 0) + binary.BigEndian.PutUint16(b[igmpv3ReportNumberOfGroupAddressRecordsOffset:], uint16(len(s.Records))) + recordsBytes := b[igmpv3ReportGroupAddressRecordsOffset:] + for _, record := range s.Records { + len := record.Length() + record.SerializeInto(recordsBytes[:len]) + recordsBytes = recordsBytes[len:] + } + binary.BigEndian.PutUint16(b[igmpChecksumOffset:], IGMPCalculateChecksum(b)) +} + +// IGMPv3ReportGroupAddressRecord is an IGMPv3 record. +// +// As per RFC 3810 section 5.2, a Multicast Address Record has the following +// internal format: +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Multicast Address * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Source Address [1] * +// | | +// * * +// | | +// +- -+ +// | | +// * * +// | | +// * Source Address [2] * +// | | +// * * +// | | +// +- -+ +// . . . +// . . . +// . . . +// +- -+ +// | | +// * * +// | | +// * Source Address [N] * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Auxiliary Data . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv3ReportGroupAddressRecord []byte + +// RecordType returns the type of this record. +func (r IGMPv3ReportGroupAddressRecord) RecordType() IGMPv3ReportRecordType { + return IGMPv3ReportRecordType(r[igmpv3ReportGroupAddressRecordTypeOffset]) +} + +// AuxDataLen returns the length of the auxillary data in this record. +func (r IGMPv3ReportGroupAddressRecord) AuxDataLen() int { + return int(r[igmpv3ReportGroupAddressRecordAuxDataLenOffset]) * igmpv3ReportGroupAddressRecordAuxDataLenUnits +} + +// numberOfSources returns the number of sources in this record. +func (r IGMPv3ReportGroupAddressRecord) numberOfSources() uint16 { + return binary.BigEndian.Uint16(r[igmpv3ReportGroupAddressRecordNumberOfSourcesOffset:]) +} + +// GroupAddress returns the multicast address this record targets. +func (r IGMPv3ReportGroupAddressRecord) GroupAddress() tcpip.Address { + return tcpip.AddrFrom4([4]byte(r[igmpv3ReportGroupAddressRecordGroupAddressOffset:][:IPv4AddressSize])) +} + +// Sources returns an iterator over source addresses in the query. +// +// Returns false if the message cannot hold the expected number of sources. +func (r IGMPv3ReportGroupAddressRecord) Sources() (AddressIterator, bool) { + expectedLen := int(r.numberOfSources()) * IPv4AddressSize + b := r[igmpv3ReportGroupAddressRecordSourcesOffset:] + if len(b) < expectedLen { + return AddressIterator{}, false + } + return AddressIterator{addressSize: IPv4AddressSize, buf: bytes.NewBuffer(b[:expectedLen])}, true +} + +// IGMPv3Report is an IGMPv3 Report. +// +// As per RFC 3810 section 5.2, +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 143 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved |Nr of Mcast Address Records (M)| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [1] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [2] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | . | +// . . . +// | . | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [M] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv3Report []byte + +// Checksum returns the checksum. +func (i IGMPv3Report) Checksum() uint16 { + return binary.BigEndian.Uint16(i[igmpChecksumOffset:]) +} + +// IGMPv3ReportGroupAddressRecordIterator is an iterator over IGMPv3 Multicast +// Address Records. +type IGMPv3ReportGroupAddressRecordIterator struct { + recordsLeft uint16 + buf *bytes.Buffer +} + +// IGMPv3ReportGroupAddressRecordIteratorNextDisposition is the possible +// return values from IGMPv3ReportGroupAddressRecordIterator.Next. +type IGMPv3ReportGroupAddressRecordIteratorNextDisposition int + +const ( + // IGMPv3ReportGroupAddressRecordIteratorNextOk indicates that a multicast + // address record was yielded. + IGMPv3ReportGroupAddressRecordIteratorNextOk IGMPv3ReportGroupAddressRecordIteratorNextDisposition = iota + + // IGMPv3ReportGroupAddressRecordIteratorNextDone indicates that the iterator + // has been exhausted. + IGMPv3ReportGroupAddressRecordIteratorNextDone + + // IGMPv3ReportGroupAddressRecordIteratorNextErrBufferTooShort indicates + // that the iterator expected another record, but the buffer ended + // prematurely. + IGMPv3ReportGroupAddressRecordIteratorNextErrBufferTooShort +) + +// Next returns the next IGMPv3 Multicast Address Record. +func (it *IGMPv3ReportGroupAddressRecordIterator) Next() (IGMPv3ReportGroupAddressRecord, IGMPv3ReportGroupAddressRecordIteratorNextDisposition) { + if it.recordsLeft == 0 { + return IGMPv3ReportGroupAddressRecord{}, IGMPv3ReportGroupAddressRecordIteratorNextDone + } + if it.buf.Len() < igmpv3ReportGroupAddressRecordMinimumSize { + return IGMPv3ReportGroupAddressRecord{}, IGMPv3ReportGroupAddressRecordIteratorNextErrBufferTooShort + } + + hdr := IGMPv3ReportGroupAddressRecord(it.buf.Bytes()) + expectedLen := igmpv3ReportGroupAddressRecordMinimumSize + + int(hdr.AuxDataLen()) + int(hdr.numberOfSources())*IPv4AddressSize + + bytes := it.buf.Next(expectedLen) + if len(bytes) < expectedLen { + return IGMPv3ReportGroupAddressRecord{}, IGMPv3ReportGroupAddressRecordIteratorNextErrBufferTooShort + } + it.recordsLeft-- + return IGMPv3ReportGroupAddressRecord(bytes), IGMPv3ReportGroupAddressRecordIteratorNextOk +} + +// GroupAddressRecords returns an iterator of IGMPv3 Multicast Address +// Records. +func (i IGMPv3Report) GroupAddressRecords() IGMPv3ReportGroupAddressRecordIterator { + return IGMPv3ReportGroupAddressRecordIterator{ + recordsLeft: binary.BigEndian.Uint16(i[igmpv3ReportNumberOfGroupAddressRecordsOffset:]), + buf: bytes.NewBuffer(i[igmpv3ReportGroupAddressRecordsOffset:]), + } +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go index 16a63a46d7..d98c1c4212 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv4.go @@ -142,26 +142,15 @@ const ( // IPv4AddressSize is the size, in bytes, of an IPv4 address. IPv4AddressSize = 4 + // IPv4AddressSizeBits is the size, in bits, of an IPv4 address. + IPv4AddressSizeBits = 32 + // IPv4ProtocolNumber is IPv4's network protocol number. IPv4ProtocolNumber tcpip.NetworkProtocolNumber = 0x0800 // IPv4Version is the version of the IPv4 protocol. IPv4Version = 4 - // IPv4AllSystems is the all systems IPv4 multicast address as per - // IANA's IPv4 Multicast Address Space Registry. See - // https://www.iana.org/assignments/multicast-addresses/multicast-addresses.xhtml. - IPv4AllSystems tcpip.Address = "\xe0\x00\x00\x01" - - // IPv4Broadcast is the broadcast address of the IPv4 procotol. - IPv4Broadcast tcpip.Address = "\xff\xff\xff\xff" - - // IPv4Any is the non-routable IPv4 "any" meta address. - IPv4Any tcpip.Address = "\x00\x00\x00\x00" - - // IPv4AllRoutersGroup is a multicast address for all routers. - IPv4AllRoutersGroup tcpip.Address = "\xe0\x00\x00\x02" - // IPv4MinimumProcessableDatagramSize is the minimum size of an IP // packet that every IPv4 capable host must be able to // process/reassemble. @@ -175,6 +164,22 @@ const ( IPv4MinimumMTU = 68 ) +var ( + // IPv4AllSystems is the all systems IPv4 multicast address as per + // IANA's IPv4 Multicast Address Space Registry. See + // https://www.iana.org/assignments/multicast-addresses/multicast-addresses.xhtml. + IPv4AllSystems = tcpip.AddrFrom4([4]byte{0xe0, 0x00, 0x00, 0x01}) + + // IPv4Broadcast is the broadcast address of the IPv4 procotol. + IPv4Broadcast = tcpip.AddrFrom4([4]byte{0xff, 0xff, 0xff, 0xff}) + + // IPv4Any is the non-routable IPv4 "any" meta address. + IPv4Any = tcpip.AddrFrom4([4]byte{0x00, 0x00, 0x00, 0x00}) + + // IPv4AllRoutersGroup is a multicast address for all routers. + IPv4AllRoutersGroup = tcpip.AddrFrom4([4]byte{0xe0, 0x00, 0x00, 0x02}) +) + // Flags that may be set in an IPv4 packet. const ( IPv4FlagMoreFragments = 1 << iota @@ -184,7 +189,7 @@ const ( // ipv4LinkLocalUnicastSubnet is the IPv4 link local unicast subnet as defined // by RFC 3927 section 1. var ipv4LinkLocalUnicastSubnet = func() tcpip.Subnet { - subnet, err := tcpip.NewSubnet("\xa9\xfe\x00\x00", "\xff\xff\x00\x00") + subnet, err := tcpip.NewSubnet(tcpip.AddrFrom4([4]byte{0xa9, 0xfe, 0x00, 0x00}), tcpip.MaskFrom("\xff\xff\x00\x00")) if err != nil { panic(err) } @@ -194,7 +199,7 @@ var ipv4LinkLocalUnicastSubnet = func() tcpip.Subnet { // ipv4LinkLocalMulticastSubnet is the IPv4 link local multicast subnet as // defined by RFC 5771 section 4. var ipv4LinkLocalMulticastSubnet = func() tcpip.Subnet { - subnet, err := tcpip.NewSubnet("\xe0\x00\x00\x00", "\xff\xff\xff\x00") + subnet, err := tcpip.NewSubnet(tcpip.AddrFrom4([4]byte{0xe0, 0x00, 0x00, 0x00}), tcpip.MaskFrom("\xff\xff\xff\x00")) if err != nil { panic(err) } @@ -203,7 +208,32 @@ var ipv4LinkLocalMulticastSubnet = func() tcpip.Subnet { // IPv4EmptySubnet is the empty IPv4 subnet. var IPv4EmptySubnet = func() tcpip.Subnet { - subnet, err := tcpip.NewSubnet(IPv4Any, tcpip.AddressMask(IPv4Any)) + subnet, err := tcpip.NewSubnet(IPv4Any, tcpip.MaskFrom("\x00\x00\x00\x00")) + if err != nil { + panic(err) + } + return subnet +}() + +// IPv4CurrentNetworkSubnet is the subnet of addresses for the current network, +// per RFC 6890 section 2.2.2, +// +// +----------------------+----------------------------+ +// | Attribute | Value | +// +----------------------+----------------------------+ +// | Address Block | 0.0.0.0/8 | +// | Name | "This host on this network"| +// | RFC | [RFC1122], Section 3.2.1.3 | +// | Allocation Date | September 1981 | +// | Termination Date | N/A | +// | Source | True | +// | Destination | False | +// | Forwardable | False | +// | Global | False | +// | Reserved-by-Protocol | True | +// +----------------------+----------------------------+ +var IPv4CurrentNetworkSubnet = func() tcpip.Subnet { + subnet, err := tcpip.NewSubnet(IPv4Any, tcpip.MaskFrom("\xff\x00\x00\x00")) if err != nil { panic(err) } @@ -212,7 +242,7 @@ var IPv4EmptySubnet = func() tcpip.Subnet { // IPv4LoopbackSubnet is the loopback subnet for IPv4. var IPv4LoopbackSubnet = func() tcpip.Subnet { - subnet, err := tcpip.NewSubnet(tcpip.Address("\x7f\x00\x00\x00"), tcpip.AddressMask("\xff\x00\x00\x00")) + subnet, err := tcpip.NewSubnet(tcpip.AddrFrom4([4]byte{0x7f, 0x00, 0x00, 0x00}), tcpip.MaskFrom("\xff\x00\x00\x00")) if err != nil { panic(err) } @@ -307,13 +337,13 @@ func (b IPv4) Checksum() uint16 { // SourceAddress returns the "source address" field of the IPv4 header. func (b IPv4) SourceAddress() tcpip.Address { - return tcpip.Address(b[srcAddr : srcAddr+IPv4AddressSize]) + return tcpip.AddrFrom4([4]byte(b[srcAddr : srcAddr+IPv4AddressSize])) } // DestinationAddress returns the "destination address" field of the IPv4 // header. func (b IPv4) DestinationAddress() tcpip.Address { - return tcpip.Address(b[dstAddr : dstAddr+IPv4AddressSize]) + return tcpip.AddrFrom4([4]byte(b[dstAddr : dstAddr+IPv4AddressSize])) } // SetSourceAddressWithChecksumUpdate implements ChecksummableNetwork. @@ -400,13 +430,13 @@ func (b IPv4) SetID(v uint16) { // SetSourceAddress sets the "source address" field of the IPv4 header. func (b IPv4) SetSourceAddress(addr tcpip.Address) { - copy(b[srcAddr:srcAddr+IPv4AddressSize], addr) + copy(b[srcAddr:srcAddr+IPv4AddressSize], addr.AsSlice()) } // SetDestinationAddress sets the "destination address" field of the IPv4 // header. func (b IPv4) SetDestinationAddress(addr tcpip.Address) { - copy(b[dstAddr:dstAddr+IPv4AddressSize], addr) + copy(b[dstAddr:dstAddr+IPv4AddressSize], addr.AsSlice()) } // CalculateChecksum calculates the checksum of the IPv4 header. @@ -435,8 +465,8 @@ func (b IPv4) Encode(i *IPv4Fields) { b[ttl] = i.TTL b[protocol] = i.Protocol b.SetChecksum(i.Checksum) - copy(b[srcAddr:srcAddr+IPv4AddressSize], i.SrcAddr) - copy(b[dstAddr:dstAddr+IPv4AddressSize], i.DstAddr) + copy(b[srcAddr:srcAddr+IPv4AddressSize], i.SrcAddr.AsSlice()) + copy(b[dstAddr:dstAddr+IPv4AddressSize], i.DstAddr.AsSlice()) } // EncodePartial updates the total length and checksum fields of IPv4 header, @@ -510,19 +540,21 @@ func (b IPv4) IsChecksumValid() bool { // address (range 224.0.0.0 to 239.255.255.255). The four most significant bits // will be 1110 = 0xe0. func IsV4MulticastAddress(addr tcpip.Address) bool { - if len(addr) != IPv4AddressSize { + if addr.BitLen() != IPv4AddressSizeBits { return false } - return (addr[0] & 0xf0) == 0xe0 + addrBytes := addr.As4() + return (addrBytes[0] & 0xf0) == 0xe0 } // IsV4LoopbackAddress determines if the provided address is an IPv4 loopback // address (belongs to 127.0.0.0/8 subnet). See RFC 1122 section 3.2.1.3. func IsV4LoopbackAddress(addr tcpip.Address) bool { - if len(addr) != IPv4AddressSize { + if addr.BitLen() != IPv4AddressSizeBits { return false } - return addr[0] == 0x7f + addrBytes := addr.As4() + return addrBytes[0] == 0x7f } // ========================= Options ========================== @@ -904,13 +936,13 @@ func (ts *IPv4OptionTimestamp) UpdateTimestamp(addr tcpip.Address, clock tcpip.C binary.BigEndian.PutUint32(slot, ipv4TimestampTime(clock)) (*ts)[IPv4OptTSPointerOffset] += IPv4OptionTimestampSize case IPv4OptionTimestampWithIPFlag: - if n := copy(slot, addr); n != IPv4AddressSize { + if n := copy(slot, addr.AsSlice()); n != IPv4AddressSize { panic(fmt.Sprintf("copied %d bytes, expected %d bytes", n, IPv4AddressSize)) } binary.BigEndian.PutUint32(slot[IPv4AddressSize:], ipv4TimestampTime(clock)) (*ts)[IPv4OptTSPointerOffset] += IPv4OptionTimestampWithAddrSize case IPv4OptionTimestampWithPredefinedIPFlag: - if tcpip.Address(slot[:IPv4AddressSize]) == addr { + if tcpip.AddrFrom4([4]byte(slot[:IPv4AddressSize])) == addr { binary.BigEndian.PutUint32(slot[IPv4AddressSize:], ipv4TimestampTime(clock)) (*ts)[IPv4OptTSPointerOffset] += IPv4OptionTimestampWithAddrSize } @@ -962,7 +994,7 @@ func (rr *IPv4OptionRecordRoute) Pointer() uint8 { func (rr *IPv4OptionRecordRoute) StoreAddress(addr tcpip.Address) { start := rr.Pointer() - 1 // A one based number. // start and room checked by caller. - if n := copy((*rr)[start:], addr); n != IPv4AddressSize { + if n := copy((*rr)[start:], addr.AsSlice()); n != IPv4AddressSize { panic(fmt.Sprintf("copied %d bytes, expected %d bytes", n, IPv4AddressSize)) } (*rr)[IPv4OptRRPointerOffset] += IPv4AddressSize diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go index d7ae19184e..ed30f77b36 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6.go @@ -81,6 +81,9 @@ const ( // IPv6AddressSize is the size, in bytes, of an IPv6 address. IPv6AddressSize = 16 + // IPv6AddressSizeBits is the size, in bits, of an IPv6 address. + IPv6AddressSizeBits = 128 + // IPv6MaximumPayloadSize is the maximum size of a valid IPv6 payload per // RFC 8200 Section 4.5. IPv6MaximumPayloadSize = 65535 @@ -91,12 +94,45 @@ const ( // IPv6Version is the version of the ipv6 protocol. IPv6Version = 6 + // IIDSize is the size of an interface identifier (IID), in bytes, as + // defined by RFC 4291 section 2.5.1. + IIDSize = 8 + + // IPv6MinimumMTU is the minimum MTU required by IPv6, per RFC 8200, + // section 5: + // IPv6 requires that every link in the Internet have an MTU of 1280 octets + // or greater. This is known as the IPv6 minimum link MTU. + IPv6MinimumMTU = 1280 + + // IIDOffsetInIPv6Address is the offset, in bytes, from the start + // of an IPv6 address to the beginning of the interface identifier + // (IID) for auto-generated addresses. That is, all bytes before + // the IIDOffsetInIPv6Address-th byte are the prefix bytes, and all + // bytes including and after the IIDOffsetInIPv6Address-th byte are + // for the IID. + IIDOffsetInIPv6Address = 8 + + // OpaqueIIDSecretKeyMinBytes is the recommended minimum number of bytes + // for the secret key used to generate an opaque interface identifier as + // outlined by RFC 7217. + OpaqueIIDSecretKeyMinBytes = 16 + + // ipv6MulticastAddressScopeByteIdx is the byte where the scope (scop) field + // is located within a multicast IPv6 address, as per RFC 4291 section 2.7. + ipv6MulticastAddressScopeByteIdx = 1 + + // ipv6MulticastAddressScopeMask is the mask for the scope (scop) field, + // within the byte holding the field, as per RFC 4291 section 2.7. + ipv6MulticastAddressScopeMask = 0xF +) + +var ( // IPv6AllNodesMulticastAddress is a link-local multicast group that // all IPv6 nodes MUST join, as per RFC 4291, section 2.8. Packets // destined to this address will reach all nodes on a link. // // The address is ff02::1. - IPv6AllNodesMulticastAddress tcpip.Address = "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + IPv6AllNodesMulticastAddress = tcpip.AddrFrom16([16]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) // IPv6AllRoutersInterfaceLocalMulticastAddress is an interface-local // multicast group that all IPv6 routers MUST join, as per RFC 4291, section @@ -104,59 +140,28 @@ const ( // interface. // // The address is ff01::2. - IPv6AllRoutersInterfaceLocalMulticastAddress tcpip.Address = "\xff\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + IPv6AllRoutersInterfaceLocalMulticastAddress = tcpip.AddrFrom16([16]byte{0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}) // IPv6AllRoutersLinkLocalMulticastAddress is a link-local multicast group // that all IPv6 routers MUST join, as per RFC 4291, section 2.8. Packets // destined to this address will reach all routers on a link. // // The address is ff02::2. - IPv6AllRoutersLinkLocalMulticastAddress tcpip.Address = "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + IPv6AllRoutersLinkLocalMulticastAddress = tcpip.AddrFrom16([16]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}) // IPv6AllRoutersSiteLocalMulticastAddress is a site-local multicast group // that all IPv6 routers MUST join, as per RFC 4291, section 2.8. Packets // destined to this address will reach all routers in a site. // // The address is ff05::2. - IPv6AllRoutersSiteLocalMulticastAddress tcpip.Address = "\xff\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" - - // IPv6MinimumMTU is the minimum MTU required by IPv6, per RFC 8200, - // section 5: - // IPv6 requires that every link in the Internet have an MTU of 1280 octets - // or greater. This is known as the IPv6 minimum link MTU. - IPv6MinimumMTU = 1280 + IPv6AllRoutersSiteLocalMulticastAddress = tcpip.AddrFrom16([16]byte{0xff, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}) // IPv6Loopback is the IPv6 Loopback address. - IPv6Loopback tcpip.Address = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + IPv6Loopback = tcpip.AddrFrom16([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) // IPv6Any is the non-routable IPv6 "any" meta address. It is also // known as the unspecified address. - IPv6Any tcpip.Address = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - - // IIDSize is the size of an interface identifier (IID), in bytes, as - // defined by RFC 4291 section 2.5.1. - IIDSize = 8 - - // IIDOffsetInIPv6Address is the offset, in bytes, from the start - // of an IPv6 address to the beginning of the interface identifier - // (IID) for auto-generated addresses. That is, all bytes before - // the IIDOffsetInIPv6Address-th byte are the prefix bytes, and all - // bytes including and after the IIDOffsetInIPv6Address-th byte are - // for the IID. - IIDOffsetInIPv6Address = 8 - - // OpaqueIIDSecretKeyMinBytes is the recommended minimum number of bytes - // for the secret key used to generate an opaque interface identifier as - // outlined by RFC 7217. - OpaqueIIDSecretKeyMinBytes = 16 - - // ipv6MulticastAddressScopeByteIdx is the byte where the scope (scop) field - // is located within a multicast IPv6 address, as per RFC 4291 section 2.7. - ipv6MulticastAddressScopeByteIdx = 1 - - // ipv6MulticastAddressScopeMask is the mask for the scope (scop) field, - // within the byte holding the field, as per RFC 4291 section 2.7. - ipv6MulticastAddressScopeMask = 0xF + IPv6Any = tcpip.AddrFrom16([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}) ) // IPv6EmptySubnet is the empty IPv6 subnet. It may also be known as the @@ -170,7 +175,7 @@ var IPv6EmptySubnet = tcpip.AddressWithPrefix{ // IPv4MappedIPv6Subnet is the prefix for an IPv4 mapped IPv6 address as defined // by RFC 4291 section 2.5.5. var IPv4MappedIPv6Subnet = tcpip.AddressWithPrefix{ - Address: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00", + Address: tcpip.AddrFrom16([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}), PrefixLen: 96, }.Subnet() @@ -179,7 +184,7 @@ var IPv4MappedIPv6Subnet = tcpip.AddressWithPrefix{ // // The prefix is fe80::/64 var IPv6LinkLocalPrefix = tcpip.AddressWithPrefix{ - Address: "\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + Address: tcpip.AddrFrom16([16]byte{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), PrefixLen: 64, } @@ -211,13 +216,13 @@ func (b IPv6) Payload() []byte { // SourceAddress returns the "source address" field of the ipv6 header. func (b IPv6) SourceAddress() tcpip.Address { - return tcpip.Address(b[v6SrcAddr:][:IPv6AddressSize]) + return tcpip.AddrFrom16([16]byte(b[v6SrcAddr:][:IPv6AddressSize])) } // DestinationAddress returns the "destination address" field of the ipv6 // header. func (b IPv6) DestinationAddress() tcpip.Address { - return tcpip.Address(b[v6DstAddr:][:IPv6AddressSize]) + return tcpip.AddrFrom16([16]byte(b[v6DstAddr:][:IPv6AddressSize])) } // Checksum implements Network.Checksum. Given that IPv6 doesn't have a @@ -245,13 +250,13 @@ func (b IPv6) SetPayloadLength(payloadLength uint16) { // SetSourceAddress sets the "source address" field of the ipv6 header. func (b IPv6) SetSourceAddress(addr tcpip.Address) { - copy(b[v6SrcAddr:][:IPv6AddressSize], addr) + copy(b[v6SrcAddr:][:IPv6AddressSize], addr.AsSlice()) } // SetDestinationAddress sets the "destination address" field of the ipv6 // header. func (b IPv6) SetDestinationAddress(addr tcpip.Address) { - copy(b[v6DstAddr:][:IPv6AddressSize], addr) + copy(b[v6DstAddr:][:IPv6AddressSize], addr.AsSlice()) } // SetHopLimit sets the value of the "Hop Limit" field. @@ -302,7 +307,7 @@ func (b IPv6) IsValid(pktSize int) bool { // IsV4MappedAddress determines if the provided address is an IPv4 mapped // address by checking if its prefix is 0:0:0:0:0:ffff::/96. func IsV4MappedAddress(addr tcpip.Address) bool { - if len(addr) != IPv6AddressSize { + if addr.BitLen() != IPv6AddressSizeBits { return false } @@ -312,10 +317,10 @@ func IsV4MappedAddress(addr tcpip.Address) bool { // IsV6MulticastAddress determines if the provided address is an IPv6 // multicast address (anything starting with FF). func IsV6MulticastAddress(addr tcpip.Address) bool { - if len(addr) != IPv6AddressSize { + if addr.BitLen() != IPv6AddressSizeBits { return false } - return addr[0] == 0xff + return addr.As16()[0] == 0xff } // IsV6UnicastAddress determines if the provided address is a valid IPv6 @@ -323,7 +328,7 @@ func IsV6MulticastAddress(addr tcpip.Address) bool { // true if addr contains IPv6AddressSize bytes, is not the unspecified // address and is not a multicast address. func IsV6UnicastAddress(addr tcpip.Address) bool { - if len(addr) != IPv6AddressSize { + if addr.BitLen() != IPv6AddressSizeBits { return false } @@ -333,22 +338,24 @@ func IsV6UnicastAddress(addr tcpip.Address) bool { } // Return if not a multicast. - return addr[0] != 0xff + return addr.As16()[0] != 0xff } -const solicitedNodeMulticastPrefix = "\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff" +var solicitedNodeMulticastPrefix = [13]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff} // SolicitedNodeAddr computes the solicited-node multicast address. This is // used for NDP. Described in RFC 4291. The argument must be a full-length IPv6 // address. func SolicitedNodeAddr(addr tcpip.Address) tcpip.Address { - return solicitedNodeMulticastPrefix + addr[len(addr)-3:] + addrBytes := addr.As16() + return tcpip.AddrFrom16([16]byte(append(solicitedNodeMulticastPrefix[:], addrBytes[len(addrBytes)-3:]...))) } // IsSolicitedNodeAddr determines whether the address is a solicited-node // multicast address. func IsSolicitedNodeAddr(addr tcpip.Address) bool { - return solicitedNodeMulticastPrefix == addr[:len(addr)-3] + addrBytes := addr.As16() + return solicitedNodeMulticastPrefix == [13]byte(addrBytes[:len(addrBytes)-3]) } // EthernetAdddressToModifiedEUI64IntoBuf populates buf with a modified EUI-64 @@ -388,16 +395,17 @@ func LinkLocalAddr(linkAddr tcpip.LinkAddress) tcpip.Address { 1: 0x80, } EthernetAdddressToModifiedEUI64IntoBuf(linkAddr, lladdrb[IIDOffsetInIPv6Address:]) - return tcpip.Address(lladdrb[:]) + return tcpip.AddrFrom16(lladdrb) } // IsV6LinkLocalUnicastAddress returns true iff the provided address is an IPv6 // link-local unicast address, as defined by RFC 4291 section 2.5.6. func IsV6LinkLocalUnicastAddress(addr tcpip.Address) bool { - if len(addr) != IPv6AddressSize { + if addr.BitLen() != IPv6AddressSizeBits { return false } - return addr[0] == 0xfe && (addr[1]&0xc0) == 0x80 + addrBytes := addr.As16() + return addrBytes[0] == 0xfe && (addrBytes[1]&0xc0) == 0x80 } // IsV6LoopbackAddress returns true iff the provided address is an IPv6 loopback @@ -429,7 +437,8 @@ func AppendOpaqueInterfaceIdentifier(buf []byte, prefix tcpip.Subnet, nicName st // Note, we omit the optional Network_ID field. h := sha256.New() // h.Write never returns an error. - h.Write([]byte(prefix.ID()[:IIDOffsetInIPv6Address])) + prefixID := prefix.ID() + h.Write([]byte(prefixID.AsSlice()[:IIDOffsetInIPv6Address])) h.Write([]byte(nicName)) h.Write([]byte{dadCounter}) h.Write(secretKey) @@ -448,7 +457,7 @@ func LinkLocalAddrWithOpaqueIID(nicName string, dadCounter uint8, secretKey []by 1: 0x80, } - return tcpip.Address(AppendOpaqueInterfaceIdentifier(lladdrb[:IIDOffsetInIPv6Address], IPv6LinkLocalPrefix.Subnet(), nicName, dadCounter, secretKey)) + return tcpip.AddrFrom16([16]byte(AppendOpaqueInterfaceIdentifier(lladdrb[:IIDOffsetInIPv6Address], IPv6LinkLocalPrefix.Subnet(), nicName, dadCounter, secretKey))) } // IPv6AddressScope is the scope of an IPv6 address. @@ -464,7 +473,7 @@ const ( // ScopeForIPv6Address returns the scope for an IPv6 address. func ScopeForIPv6Address(addr tcpip.Address) (IPv6AddressScope, tcpip.Error) { - if len(addr) != IPv6AddressSize { + if addr.BitLen() != IPv6AddressSizeBits { return GlobalScope, &tcpip.ErrBadAddress{} } @@ -508,7 +517,7 @@ func InitialTempIID(initialTempIIDHistory []byte, seed []byte, nicID tcpip.NICID // // Panics if tempIIDHistory is not at least IIDSize bytes. func GenerateTempIPv6SLAACAddr(tempIIDHistory []byte, stableAddr tcpip.Address) tcpip.AddressWithPrefix { - addrBytes := []byte(stableAddr) + addrBytes := stableAddr.As16() h := sha256.New() h.Write(tempIIDHistory) h.Write(addrBytes[IIDOffsetInIPv6Address:]) @@ -526,7 +535,7 @@ func GenerateTempIPv6SLAACAddr(tempIIDHistory []byte, stableAddr tcpip.Address) } return tcpip.AddressWithPrefix{ - Address: tcpip.Address(addrBytes), + Address: tcpip.AddrFrom16(addrBytes), PrefixLen: IIDOffsetInIPv6Address * 8, } } @@ -571,5 +580,6 @@ const ( // V6MulticastScope returns the scope of a multicast address. func V6MulticastScope(addr tcpip.Address) IPv6MulticastScope { - return IPv6MulticastScope(addr[ipv6MulticastAddressScopeByteIdx] & ipv6MulticastAddressScopeMask) + addrBytes := addr.As16() + return IPv6MulticastScope(addrBytes[ipv6MulticastAddressScopeByteIdx] & ipv6MulticastAddressScopeMask) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go index ecdb015148..2577c900de 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ipv6_extension_headers.go @@ -21,7 +21,7 @@ import ( "io" "math" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" ) @@ -163,7 +163,7 @@ type IPv6PayloadHeader interface { // it's no longer needed. type IPv6RawPayloadHeader struct { Identifier IPv6ExtensionHeaderIdentifier - Buf bufferv2.Buffer + Buf buffer.Buffer } // isIPv6PayloadHeader implements IPv6PayloadHeader.isIPv6PayloadHeader. @@ -176,7 +176,7 @@ func (i IPv6RawPayloadHeader) Release() { // ipv6OptionsExtHdr is an IPv6 extension header that holds options. type ipv6OptionsExtHdr struct { - buf *bufferv2.View + buf *buffer.View } // Release implements IPv6PayloadHeader.Release. @@ -203,7 +203,7 @@ func (i ipv6OptionsExtHdr) Iter() IPv6OptionsExtHdrOptionsIterator { // modify the backing payload so long as the IPv6OptionsExtHdrOptionsIterator // obtained before modification is no longer used. type IPv6OptionsExtHdrOptionsIterator struct { - reader *bufferv2.View + reader *buffer.View // optionOffset is the number of bytes from the first byte of the // options field to the beginning of the current option. @@ -299,7 +299,7 @@ var ErrMalformedIPv6ExtHdrOption = errors.New("malformed IPv6 extension header o // header option that is unknown by the parsing utilities. type IPv6UnknownExtHdrOption struct { Identifier IPv6ExtHdrOptionIdentifier - Data *bufferv2.View + Data *buffer.View } // UnknownAction implements IPv6OptionUnknownAction.UnknownAction. @@ -382,7 +382,7 @@ func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error } return &IPv6RouterAlertOption{Value: IPv6RouterAlertValue(binary.BigEndian.Uint16(routerAlertValue[:]))}, false, nil default: - bytes := bufferv2.NewView(int(length)) + bytes := buffer.NewView(int(length)) if n, err := io.CopyN(bytes, i.reader, int64(length)); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -416,7 +416,7 @@ func (IPv6DestinationOptionsExtHdr) isIPv6PayloadHeader() {} // IPv6RoutingExtHdr is a buffer holding the Routing extension header specific // data as outlined in RFC 8200 section 4.4. type IPv6RoutingExtHdr struct { - Buf *bufferv2.View + Buf *buffer.View } // isIPv6PayloadHeader implements IPv6PayloadHeader.isIPv6PayloadHeader. @@ -488,7 +488,7 @@ type IPv6PayloadIterator struct { // The identifier of the next header to parse. nextHdrIdentifier IPv6ExtensionHeaderIdentifier - payload bufferv2.Buffer + payload buffer.Buffer // Indicates to the iterator that it should return the remaining payload as a // raw payload on the next call to Next. @@ -521,7 +521,7 @@ func (i IPv6PayloadIterator) ParseOffset() uint32 { // MakeIPv6PayloadIterator returns an iterator over the IPv6 payload containing // extension headers, or a raw payload if the payload cannot be parsed. The // iterator takes ownership of the payload. -func MakeIPv6PayloadIterator(nextHdrIdentifier IPv6ExtensionHeaderIdentifier, payload bufferv2.Buffer) IPv6PayloadIterator { +func MakeIPv6PayloadIterator(nextHdrIdentifier IPv6ExtensionHeaderIdentifier, payload buffer.Buffer) IPv6PayloadIterator { return IPv6PayloadIterator{ nextHdrIdentifier: nextHdrIdentifier, payload: payload, @@ -543,7 +543,7 @@ func (i *IPv6PayloadIterator) Release() { func (i *IPv6PayloadIterator) AsRawHeader(consume bool) IPv6RawPayloadHeader { identifier := i.nextHdrIdentifier - var buf bufferv2.Buffer + var buf buffer.Buffer if consume { // Since we consume the iterator, we return the payload as is. buf = i.payload @@ -638,6 +638,12 @@ func (i *IPv6PayloadIterator) Next() (IPv6PayloadHeader, bool, error) { } } +// NextHeaderIdentifier returns the identifier of the header next returned by +// it.Next(). +func (i *IPv6PayloadIterator) NextHeaderIdentifier() IPv6ExtensionHeaderIdentifier { + return i.nextHdrIdentifier +} + // nextHeaderData returns the extension header's Next Header field and raw data. // // fragmentHdr indicates that the extension header being parsed is the Fragment @@ -647,7 +653,7 @@ func (i *IPv6PayloadIterator) Next() (IPv6PayloadHeader, bool, error) { // If bytes is not nil, extension header specific data will be read into bytes // if it has enough capacity. If bytes is provided but does not have enough // capacity for the data, nextHeaderData will panic. -func (i *IPv6PayloadIterator) nextHeaderData(fragmentHdr bool, bytes []byte) (IPv6ExtensionHeaderIdentifier, *bufferv2.View, error) { +func (i *IPv6PayloadIterator) nextHeaderData(fragmentHdr bool, bytes []byte) (IPv6ExtensionHeaderIdentifier, *buffer.View, error) { // We ignore the number of bytes read because we know we will only ever read // at max 1 bytes since rune has a length of 1. If we read 0 bytes, the Read // would return io.EOF to indicate that io.Reader has reached the end of the @@ -694,7 +700,7 @@ func (i *IPv6PayloadIterator) nextHeaderData(fragmentHdr bool, bytes []byte) (IP } return IPv6ExtensionHeaderIdentifier(nextHdrIdentifier), nil, nil } - v := bufferv2.NewView(bytesLen) + v := buffer.NewView(bytesLen) if n, err := io.CopyN(v, &rdr, int64(bytesLen)); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mld.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mld.go index 131fca24b5..861f56cff6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mld.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mld.go @@ -92,12 +92,12 @@ func (m MLD) MulticastAddress() tcpip.Address { // In a Report or Done message, the Multicast Address field holds a // specific IPv6 multicast address to which the message sender is // listening or is ceasing to listen, respectively. - return tcpip.Address(m[mldMulticastAddressOffset:][:IPv6AddressSize]) + return tcpip.AddrFrom16([16]byte(m[mldMulticastAddressOffset:][:IPv6AddressSize])) } // SetMulticastAddress sets the Multicast Address field. func (m MLD) SetMulticastAddress(multicastAddress tcpip.Address) { - if n := copy(m[mldMulticastAddressOffset:], multicastAddress); n != IPv6AddressSize { + if n := copy(m[mldMulticastAddressOffset:], multicastAddress.AsSlice()); n != IPv6AddressSize { panic(fmt.Sprintf("copied %d bytes, expected to copy %d bytes", n, IPv6AddressSize)) } } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go new file mode 100644 index 0000000000..0c33f5793f --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2.go @@ -0,0 +1,541 @@ +// Copyright 2022 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package header + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" +) + +const ( + // MLDv2QueryMinimumSize is the minimum size for an MLDv2 message. + MLDv2QueryMinimumSize = 24 + + mldv2QueryMaximumResponseCodeOffset = 0 + mldv2QueryResvSQRVOffset = 20 + mldv2QueryQRVMask = 0b111 + mldv2QueryQQICOffset = 21 + // mldv2QueryNumberOfSourcesOffset is the offset to the Number of Sources + // field within MLDv2Query. + mldv2QueryNumberOfSourcesOffset = 22 + + // MLDv2ReportMinimumSize is the minimum size of an MLDv2 report. + MLDv2ReportMinimumSize = 24 + + // mldv2QuerySourcesOffset is the offset to the Sources field within + // MLDv2Query. + mldv2QuerySourcesOffset = 24 +) + +var ( + // MLDv2RoutersAddress is the address to send MLDv2 reports to. + // + // As per RFC 3810 section 5.2.14, + // + // Version 2 Multicast Listener Reports are sent with an IP destination + // address of FF02:0:0:0:0:0:0:16, to which all MLDv2-capable multicast + // routers listen (see section 11 for IANA considerations related to + // this special destination address). + MLDv2RoutersAddress = tcpip.AddrFrom16([16]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16}) +) + +// MLDv2Query is a Multicast Listener Discovery Version 2 Query message in an +// ICMPv6 packet. +// +// MLDv2Query will only contain the body of an ICMPv6 packet. +// +// As per RFC 3810 section 5.1, MLDv2 Query messages have the following format +// (MLDv2Query only holds the bytes after the first four bytes in the diagram +// below): +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 130 | Code | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Maximum Response Code | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Multicast Address * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Resv |S| QRV | QQIC | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Source Address [1] * +// | | +// * * +// | | +// +- -+ +// | | +// * * +// | | +// * Source Address [2] * +// | | +// * * +// | | +// +- . -+ +// . . . +// . . . +// +- -+ +// | | +// * * +// | | +// * Source Address [N] * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type MLDv2Query MLD + +// MaximumResponseCode returns the Maximum Response Code +func (m MLDv2Query) MaximumResponseCode() uint16 { + return binary.BigEndian.Uint16(m[mldv2QueryMaximumResponseCodeOffset:]) +} + +// MLDv2MaximumResponseDelay returns the Maximum Response Delay in an MLDv2 +// Maximum Response Code. +// +// As per RFC 3810 section 5.1.3, +// +// The Maximum Response Code field specifies the maximum time allowed +// before sending a responding Report. The actual time allowed, called +// the Maximum Response Delay, is represented in units of milliseconds, +// and is derived from the Maximum Response Code as follows: +// +// If Maximum Response Code < 32768, +// Maximum Response Delay = Maximum Response Code +// +// If Maximum Response Code >=32768, Maximum Response Code represents a +// floating-point value as follows: +// +// 0 1 2 3 4 5 6 7 8 9 A B C D E F +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |1| exp | mant | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// Maximum Response Delay = (mant | 0x1000) << (exp+3) +// +// Small values of Maximum Response Delay allow MLDv2 routers to tune +// the "leave latency" (the time between the moment the last node on a +// link ceases to listen to a specific multicast address and the moment +// the routing protocol is notified that there are no more listeners for +// that address). Larger values, especially in the exponential range, +// allow the tuning of the burstiness of MLD traffic on a link. +func MLDv2MaximumResponseDelay(codeRaw uint16) time.Duration { + code := time.Duration(codeRaw) + if code < 32768 { + return code * time.Millisecond + } + + const mantBits = 12 + const expMask = 0b111 + exp := (code >> mantBits) & expMask + mant := code & ((1 << mantBits) - 1) + return (mant | 0x1000) << (exp + 3) * time.Millisecond +} + +// MulticastAddress returns the Multicast Address. +func (m MLDv2Query) MulticastAddress() tcpip.Address { + // As per RFC 2710 section 3.5: + // + // In a Query message, the Multicast Address field is set to zero when + // sending a General Query, and set to a specific IPv6 multicast address + // when sending a Multicast-Address-Specific Query. + // + // In a Report or Done message, the Multicast Address field holds a + // specific IPv6 multicast address to which the message sender is + // listening or is ceasing to listen, respectively. + return tcpip.AddrFrom16([16]byte(m[mldMulticastAddressOffset:][:IPv6AddressSize])) +} + +// QuerierRobustnessVariable returns the querier's robustness variable. +func (m MLDv2Query) QuerierRobustnessVariable() uint8 { + return m[mldv2QueryResvSQRVOffset] & mldv2QueryQRVMask +} + +// QuerierQueryInterval returns the querier's query interval. +func (m MLDv2Query) QuerierQueryInterval() time.Duration { + return mldv2AndIGMPv3QuerierQueryCodeToInterval(m[mldv2QueryQQICOffset]) +} + +// Sources returns an iterator over source addresses in the query. +// +// Returns false if the message cannot hold the expected number of sources. +func (m MLDv2Query) Sources() (AddressIterator, bool) { + return makeAddressIterator( + m[mldv2QuerySourcesOffset:], + binary.BigEndian.Uint16(m[mldv2QueryNumberOfSourcesOffset:]), + IPv6AddressSize, + ) +} + +// MLDv2ReportRecordType is the type of an MLDv2 multicast address record +// found in an MLDv2 report, as per RFC 3810 section 5.2.12. +type MLDv2ReportRecordType int + +// MLDv2 multicast address record types, as per RFC 3810 section 5.2.12. +const ( + MLDv2ReportRecordModeIsInclude MLDv2ReportRecordType = 1 + MLDv2ReportRecordModeIsExclude MLDv2ReportRecordType = 2 + MLDv2ReportRecordChangeToIncludeMode MLDv2ReportRecordType = 3 + MLDv2ReportRecordChangeToExcludeMode MLDv2ReportRecordType = 4 + MLDv2ReportRecordAllowNewSources MLDv2ReportRecordType = 5 + MLDv2ReportRecordBlockOldSources MLDv2ReportRecordType = 6 +) + +const ( + mldv2ReportMulticastAddressRecordMinimumSize = 20 + mldv2ReportMulticastAddressRecordTypeOffset = 0 + mldv2ReportMulticastAddressRecordAuxDataLenOffset = 1 + mldv2ReportMulticastAddressRecordAuxDataLenUnits = 4 + mldv2ReportMulticastAddressRecordNumberOfSourcesOffset = 2 + mldv2ReportMulticastAddressRecordMulticastAddressOffset = 4 + mldv2ReportMulticastAddressRecordSourcesOffset = 20 +) + +// MLDv2ReportMulticastAddressRecordSerializer is an MLDv2 Multicast Address +// Record serializer. +// +// As per RFC 3810 section 5.2, a Multicast Address Record has the following +// internal format: +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Multicast Address * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Source Address [1] * +// | | +// * * +// | | +// +- -+ +// | | +// * * +// | | +// * Source Address [2] * +// | | +// * * +// | | +// +- -+ +// . . . +// . . . +// . . . +// +- -+ +// | | +// * * +// | | +// * Source Address [N] * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Auxiliary Data . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type MLDv2ReportMulticastAddressRecordSerializer struct { + RecordType MLDv2ReportRecordType + MulticastAddress tcpip.Address + Sources []tcpip.Address +} + +// Length returns the number of bytes this serializer would occupy. +func (s *MLDv2ReportMulticastAddressRecordSerializer) Length() int { + return mldv2ReportMulticastAddressRecordSourcesOffset + len(s.Sources)*IPv6AddressSize +} + +func copyIPv6Address(dst []byte, src tcpip.Address) { + if n := copy(dst, src.AsSlice()); n != IPv6AddressSize { + panic(fmt.Sprintf("got copy(...) = %d, want = %d", n, IPv6AddressSize)) + } +} + +// SerializeInto serializes the record into the buffer. +// +// Panics if the buffer does not have enough space to fit the record. +func (s *MLDv2ReportMulticastAddressRecordSerializer) SerializeInto(b []byte) { + b[mldv2ReportMulticastAddressRecordTypeOffset] = byte(s.RecordType) + b[mldv2ReportMulticastAddressRecordAuxDataLenOffset] = 0 + binary.BigEndian.PutUint16(b[mldv2ReportMulticastAddressRecordNumberOfSourcesOffset:], uint16(len(s.Sources))) + copyIPv6Address(b[mldv2ReportMulticastAddressRecordMulticastAddressOffset:], s.MulticastAddress) + b = b[mldv2ReportMulticastAddressRecordSourcesOffset:] + for _, source := range s.Sources { + copyIPv6Address(b, source) + b = b[IPv6AddressSize:] + } +} + +const ( + mldv2ReportReservedOffset = 0 + mldv2ReportNumberOfMulticastAddressRecordsOffset = 2 + mldv2ReportMulticastAddressRecordsOffset = 4 +) + +// MLDv2ReportSerializer is an MLD Version 2 Report serializer. +// +// As per RFC 3810 section 5.2, +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 143 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved |Nr of Mcast Address Records (M)| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [1] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [2] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | . | +// . . . +// | . | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [M] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type MLDv2ReportSerializer struct { + Records []MLDv2ReportMulticastAddressRecordSerializer +} + +// Length returns the number of bytes this serializer would occupy. +func (s *MLDv2ReportSerializer) Length() int { + ret := mldv2ReportMulticastAddressRecordsOffset + for _, record := range s.Records { + ret += record.Length() + } + return ret +} + +// SerializeInto serializes the report into the buffer. +// +// Panics if the buffer does not have enough space to fit the report. +func (s *MLDv2ReportSerializer) SerializeInto(b []byte) { + binary.BigEndian.PutUint16(b[mldv2ReportReservedOffset:], 0) + binary.BigEndian.PutUint16(b[mldv2ReportNumberOfMulticastAddressRecordsOffset:], uint16(len(s.Records))) + b = b[mldv2ReportMulticastAddressRecordsOffset:] + for _, record := range s.Records { + len := record.Length() + record.SerializeInto(b[:len]) + b = b[len:] + } +} + +// MLDv2ReportMulticastAddressRecord is an MLDv2 record. +// +// As per RFC 3810 section 5.2, a Multicast Address Record has the following +// internal format: +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Multicast Address * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// * * +// | | +// * Source Address [1] * +// | | +// * * +// | | +// +- -+ +// | | +// * * +// | | +// * Source Address [2] * +// | | +// * * +// | | +// +- -+ +// . . . +// . . . +// . . . +// +- -+ +// | | +// * * +// | | +// * Source Address [N] * +// | | +// * * +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Auxiliary Data . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type MLDv2ReportMulticastAddressRecord []byte + +// RecordType returns the type of this record. +func (r MLDv2ReportMulticastAddressRecord) RecordType() MLDv2ReportRecordType { + return MLDv2ReportRecordType(r[mldv2ReportMulticastAddressRecordTypeOffset]) +} + +// AuxDataLen returns the length of the auxillary data in this record. +func (r MLDv2ReportMulticastAddressRecord) AuxDataLen() int { + return int(r[mldv2ReportMulticastAddressRecordAuxDataLenOffset]) * mldv2ReportMulticastAddressRecordAuxDataLenUnits +} + +// numberOfSources returns the number of sources in this record. +func (r MLDv2ReportMulticastAddressRecord) numberOfSources() uint16 { + return binary.BigEndian.Uint16(r[mldv2ReportMulticastAddressRecordNumberOfSourcesOffset:]) +} + +// MulticastAddress returns the multicast address this record targets. +func (r MLDv2ReportMulticastAddressRecord) MulticastAddress() tcpip.Address { + return tcpip.AddrFrom16([16]byte(r[mldv2ReportMulticastAddressRecordMulticastAddressOffset:][:IPv6AddressSize])) +} + +// Sources returns an iterator over source addresses in the query. +// +// Returns false if the message cannot hold the expected number of sources. +func (r MLDv2ReportMulticastAddressRecord) Sources() (AddressIterator, bool) { + expectedLen := int(r.numberOfSources()) * IPv6AddressSize + b := r[mldv2ReportMulticastAddressRecordSourcesOffset:] + if len(b) < expectedLen { + return AddressIterator{}, false + } + return AddressIterator{addressSize: IPv6AddressSize, buf: bytes.NewBuffer(b[:expectedLen])}, true +} + +// MLDv2Report is an MLDv2 Report. +// +// As per RFC 3810 section 5.2, +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 143 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved |Nr of Mcast Address Records (M)| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [1] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [2] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | . | +// . . . +// | . | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Multicast Address Record [M] . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type MLDv2Report []byte + +// MLDv2ReportMulticastAddressRecordIterator is an iterator over MLDv2 Multicast +// Address Records. +type MLDv2ReportMulticastAddressRecordIterator struct { + recordsLeft uint16 + buf *bytes.Buffer +} + +// MLDv2ReportMulticastAddressRecordIteratorNextDisposition is the possible +// return values from MLDv2ReportMulticastAddressRecordIterator.Next. +type MLDv2ReportMulticastAddressRecordIteratorNextDisposition int + +const ( + // MLDv2ReportMulticastAddressRecordIteratorNextOk indicates that a multicast + // address record was yielded. + MLDv2ReportMulticastAddressRecordIteratorNextOk MLDv2ReportMulticastAddressRecordIteratorNextDisposition = iota + + // MLDv2ReportMulticastAddressRecordIteratorNextDone indicates that the iterator + // has been exhausted. + MLDv2ReportMulticastAddressRecordIteratorNextDone + + // MLDv2ReportMulticastAddressRecordIteratorNextErrBufferTooShort indicates + // that the iterator expected another record, but the buffer ended + // prematurely. + MLDv2ReportMulticastAddressRecordIteratorNextErrBufferTooShort +) + +// Next returns the next MLDv2 Multicast Address Record. +func (it *MLDv2ReportMulticastAddressRecordIterator) Next() (MLDv2ReportMulticastAddressRecord, MLDv2ReportMulticastAddressRecordIteratorNextDisposition) { + if it.recordsLeft == 0 { + return MLDv2ReportMulticastAddressRecord{}, MLDv2ReportMulticastAddressRecordIteratorNextDone + } + if it.buf.Len() < mldv2ReportMulticastAddressRecordMinimumSize { + return MLDv2ReportMulticastAddressRecord{}, MLDv2ReportMulticastAddressRecordIteratorNextErrBufferTooShort + } + + hdr := MLDv2ReportMulticastAddressRecord(it.buf.Bytes()) + expectedLen := mldv2ReportMulticastAddressRecordMinimumSize + + int(hdr.AuxDataLen()) + int(hdr.numberOfSources())*IPv6AddressSize + + bytes := it.buf.Next(expectedLen) + if len(bytes) < expectedLen { + return MLDv2ReportMulticastAddressRecord{}, MLDv2ReportMulticastAddressRecordIteratorNextErrBufferTooShort + } + it.recordsLeft-- + return MLDv2ReportMulticastAddressRecord(bytes), MLDv2ReportMulticastAddressRecordIteratorNextOk +} + +// MulticastAddressRecords returns an iterator of MLDv2 Multicast Address +// Records. +func (m MLDv2Report) MulticastAddressRecords() MLDv2ReportMulticastAddressRecordIterator { + return MLDv2ReportMulticastAddressRecordIterator{ + recordsLeft: binary.BigEndian.Uint16(m[mldv2ReportNumberOfMulticastAddressRecordsOffset:]), + buf: bytes.NewBuffer(m[mldv2ReportMulticastAddressRecordsOffset:]), + } +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2_igmpv3_common.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2_igmpv3_common.go new file mode 100644 index 0000000000..94ebc9a2f1 --- /dev/null +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/mldv2_igmpv3_common.go @@ -0,0 +1,124 @@ +// Copyright 2022 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package header + +import ( + "bytes" + "fmt" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" +) + +func mldv2AndIGMPv3QuerierQueryCodeToInterval(code uint8) time.Duration { + // MLDv2: As per RFC 3810 section 5.1.19, + // + // The Querier's Query Interval Code field specifies the [Query + // Interval] used by the Querier. The actual interval, called the + // Querier's Query Interval (QQI), is represented in units of seconds, + // and is derived from the Querier's Query Interval Code as follows: + // + // If QQIC < 128, QQI = QQIC + // + // If QQIC >= 128, QQIC represents a floating-point value as follows: + // + // 0 1 2 3 4 5 6 7 + // +-+-+-+-+-+-+-+-+ + // |1| exp | mant | + // +-+-+-+-+-+-+-+-+ + // + // QQI = (mant | 0x10) << (exp + 3) + // + // Multicast routers that are not the current Querier adopt the QQI + // value from the most recently received Query as their own [Query + // Interval] value, unless that most recently received QQI was zero, in + // which case the receiving routers use the default [Query Interval] + // value specified in section 9.2. + // + // IGMPv3: As per RFC 3376 section 4.1.7, + // + // The Querier's Query Interval Code field specifies the [Query + // Interval] used by the querier. The actual interval, called the + // Querier's Query Interval (QQI), is represented in units of seconds + // and is derived from the Querier's Query Interval Code as follows: + // + // If QQIC < 128, QQI = QQIC + // + // If QQIC >= 128, QQIC represents a floating-point value as follows: + // + // 0 1 2 3 4 5 6 7 + // +-+-+-+-+-+-+-+-+ + // |1| exp | mant | + // +-+-+-+-+-+-+-+-+ + // + // QQI = (mant | 0x10) << (exp + 3) + // + // Multicast routers that are not the current querier adopt the QQI + // value from the most recently received Query as their own [Query + // Interval] value, unless that most recently received QQI was zero, in + // which case the receiving routers use the default [Query Interval] + // value specified in section 8.2. + interval := time.Duration(code) + if interval < 128 { + return interval * time.Second + } + + const expMask = 0b111 + const mantBits = 4 + mant := interval & ((1 << mantBits) - 1) + exp := (interval >> mantBits) & expMask + return (mant | 0x10) << (exp + 3) * time.Second +} + +// MakeAddressIterator returns an AddressIterator. +func MakeAddressIterator(addressSize int, buf *bytes.Buffer) AddressIterator { + return AddressIterator{addressSize: addressSize, buf: buf} +} + +// AddressIterator is an iterator over IPv6 addresses. +type AddressIterator struct { + addressSize int + buf *bytes.Buffer +} + +// Done indicates that the iterator has been exhausted/has no more elements. +func (it *AddressIterator) Done() bool { + return it.buf.Len() == 0 +} + +// Next returns the next address in the iterator. +// +// Returns false if the iterator has been exhausted. +func (it *AddressIterator) Next() (tcpip.Address, bool) { + if it.Done() { + var emptyAddress tcpip.Address + return emptyAddress, false + } + + b := it.buf.Next(it.addressSize) + if len(b) != it.addressSize { + panic(fmt.Sprintf("got len(buf.Next(%d)) = %d, want = %d", it.addressSize, len(b), it.addressSize)) + } + + return tcpip.AddrFromSlice(b), true +} + +func makeAddressIterator(b []byte, expectedAddresses uint16, addressSize int) (AddressIterator, bool) { + expectedLen := int(expectedAddresses) * addressSize + if len(b) < expectedLen { + return AddressIterator{}, false + } + return MakeAddressIterator(addressSize, bytes.NewBuffer(b[:expectedLen])), true +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go index 505c926685..beb7fff414 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_advert.go @@ -54,12 +54,12 @@ const ( // TargetAddress returns the value within the Target Address field. func (b NDPNeighborAdvert) TargetAddress() tcpip.Address { - return tcpip.Address(b[ndpNATargetAddressOffset:][:IPv6AddressSize]) + return tcpip.AddrFrom16Slice(b[ndpNATargetAddressOffset:][:IPv6AddressSize]) } // SetTargetAddress sets the value within the Target Address field. func (b NDPNeighborAdvert) SetTargetAddress(addr tcpip.Address) { - copy(b[ndpNATargetAddressOffset:][:IPv6AddressSize], addr) + copy(b[ndpNATargetAddressOffset:][:IPv6AddressSize], addr.AsSlice()) } // RouterFlag returns the value of the Router Flag field. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go index 3a1b8e1390..f39e062068 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_neighbor_solicit.go @@ -38,12 +38,12 @@ const ( // TargetAddress returns the value within the Target Address field. func (b NDPNeighborSolicit) TargetAddress() tcpip.Address { - return tcpip.Address(b[ndpNSTargetAddessOffset:][:IPv6AddressSize]) + return tcpip.AddrFrom16Slice(b[ndpNSTargetAddessOffset:][:IPv6AddressSize]) } // SetTargetAddress sets the value within the Target Address field. func (b NDPNeighborSolicit) SetTargetAddress(addr tcpip.Address) { - copy(b[ndpNSTargetAddessOffset:][:IPv6AddressSize], addr) + copy(b[ndpNSTargetAddessOffset:][:IPv6AddressSize], addr.AsSlice()) } // Options returns an NDPOptions of the the options body. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go index 06954e0e3d..1dc8111d79 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/ndp_options.go @@ -647,7 +647,7 @@ func (o NDPPrefixInformation) PreferredLifetime() time.Duration { // Hosts SHOULD ignore an NDP Prefix Information option where the Prefix field // holds the link-local prefix (fe80::). func (o NDPPrefixInformation) Prefix() tcpip.Address { - return tcpip.Address(o[ndpPrefixInformationPrefixOffset:][:IPv6AddressSize]) + return tcpip.AddrFrom16Slice(o[ndpPrefixInformationPrefixOffset:][:IPv6AddressSize]) } // Subnet returns the Prefix field and Prefix Length field represented in a @@ -748,7 +748,7 @@ func (o NDPRecursiveDNSServer) iterAddresses(fn func(tcpip.Address)) error { } for i := 0; len(o) != 0; i++ { - addr := tcpip.Address(o[:IPv6AddressSize]) + addr := tcpip.AddrFrom16Slice(o[:IPv6AddressSize]) if !IsV6UnicastAddress(addr) { return fmt.Errorf("%d-th address (%s) in NDP Recursive DNS Server option is not a valid unicast IPv6 address: %w", i, addr, ErrNDPOptMalformedBody) } @@ -1037,7 +1037,7 @@ func (o NDPRouteInformation) Prefix() (tcpip.Subnet, error) { } return tcpip.AddressWithPrefix{ - Address: tcpip.Address(addrBytes[:]), + Address: tcpip.AddrFrom16(addrBytes), PrefixLen: prefixLength, }.Subnet(), nil } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go index 0e29377cc0..33a85fdbc4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/header/parse/parse.go @@ -215,17 +215,14 @@ func ICMPv6(pkt stack.PacketBufferPtr) bool { header.ICMPv6RouterAdvert, header.ICMPv6NeighborSolicit, header.ICMPv6NeighborAdvert, - header.ICMPv6RedirectMsg: - size := pkt.Data().Size() - if _, ok := pkt.TransportHeader().Consume(size); !ok { - panic(fmt.Sprintf("expected to consume the full data of size = %d bytes into transport header", size)) - } - case header.ICMPv6MulticastListenerQuery, + header.ICMPv6RedirectMsg, + header.ICMPv6MulticastListenerQuery, header.ICMPv6MulticastListenerReport, + header.ICMPv6MulticastListenerV2Report, header.ICMPv6MulticastListenerDone: - size := header.ICMPv6HeaderSize + header.MLDMinimumSize + size := pkt.Data().Size() if _, ok := pkt.TransportHeader().Consume(size); !ok { - return false + panic(fmt.Sprintf("expected to consume the full data of size = %d bytes into transport header", size)) } case header.ICMPv6DstUnreachable, header.ICMPv6PacketTooBig, diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go index cbf1ffc53a..b20ce9004f 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/nested/nested.go @@ -61,12 +61,12 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk } // DeliverLinkPacket implements stack.NetworkDispatcher. -func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr, incoming bool) { +func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { e.mu.RLock() d := e.dispatcher e.mu.RUnlock() if d != nil { - d.DeliverLinkPacket(protocol, pkt, incoming) + d.DeliverLinkPacket(protocol, pkt) } } @@ -147,3 +147,8 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType { func (e *Endpoint) AddHeader(pkt stack.PacketBufferPtr) { e.child.AddHeader(pkt) } + +// ParseHeader implements stack.LinkEndpoint.ParseHeader. +func (e *Endpoint) ParseHeader(pkt stack.PacketBufferPtr) bool { + return e.child.ParseHeader(pkt) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go index 470a57bc2a..c3a1867410 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/link/sniffer/sniffer.go @@ -173,8 +173,8 @@ func (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) { // Figure out the network layer info. var transProto uint8 - src := tcpip.Address("unknown") - dst := tcpip.Address("unknown") + var src tcpip.Address + var dst tcpip.Address var size uint16 var id uint32 var fragmentOffset uint16 @@ -232,8 +232,8 @@ func LogPacket(prefix string, dir Direction, protocol tcpip.NetworkProtocolNumbe "%s%s arp %s (%s) -> %s (%s) valid:%t", prefix, directionPrefix, - tcpip.Address(arp.ProtocolAddressSender()), tcpip.LinkAddress(arp.HardwareAddressSender()), - tcpip.Address(arp.ProtocolAddressTarget()), tcpip.LinkAddress(arp.HardwareAddressTarget()), + tcpip.AddrFromSlice(arp.ProtocolAddressSender()), tcpip.LinkAddress(arp.HardwareAddressSender()), + tcpip.AddrFromSlice(arp.ProtocolAddressTarget()), tcpip.LinkAddress(arp.HardwareAddressTarget()), arp.IsValid(), ) return diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go index 1f14899932..68786e108e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/arp/arp.go @@ -169,14 +169,14 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { switch h.Op() { case header.ARPRequest: stats.requestsReceived.Increment() - localAddr := tcpip.Address(h.ProtocolAddressTarget()) + localAddr := tcpip.AddrFrom4Slice(h.ProtocolAddressTarget()) if !e.nic.CheckLocalAddress(header.IPv4ProtocolNumber, localAddr) { stats.requestsReceivedUnknownTargetAddress.Increment() return // we have no useful answer, ignore the request } - remoteAddr := tcpip.Address(h.ProtocolAddressSender()) + remoteAddr := tcpip.AddrFrom4Slice(h.ProtocolAddressSender()) remoteLinkAddr := tcpip.LinkAddress(h.HardwareAddressSender()) switch err := e.nic.HandleNeighborProbe(header.IPv4ProtocolNumber, remoteAddr, remoteLinkAddr); err.(type) { @@ -223,19 +223,18 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { case header.ARPReply: stats.repliesReceived.Increment() - addr := tcpip.Address(h.ProtocolAddressSender()) + addr := tcpip.AddrFrom4Slice(h.ProtocolAddressSender()) linkAddr := tcpip.LinkAddress(h.HardwareAddressSender()) e.mu.Lock() e.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr}) e.mu.Unlock() - // The solicited, override, and isRouter flags are not available for ARP; - // they are only available for IPv6 Neighbor Advertisements. switch err := e.nic.HandleNeighborConfirmation(header.IPv4ProtocolNumber, addr, linkAddr, stack.ReachabilityConfirmationFlags{ - // Solicited and unsolicited (also referred to as gratuitous) ARP Replies - // are handled equivalently to a solicited Neighbor Advertisement. - Solicited: true, + // Only unicast ARP replies are considered solicited. Broadcast replies + // are gratuitous ARP replies and should not move neighbor entries to the + // reachable state. + Solicited: pkt.PktType == tcpip.PacketHost, // If a different link address is received than the one cached, the entry // should always go to Stale. Override: false, @@ -267,7 +266,7 @@ func (p *protocol) Number() tcpip.NetworkProtocolNumber { return ProtocolNumber func (p *protocol) MinimumPacketSize() int { return header.ARPSize } func (*protocol) ParseAddresses([]byte) (src, dst tcpip.Address) { - return "", "" + return tcpip.Address{}, tcpip.Address{} } func (p *protocol) NewEndpoint(nic stack.NetworkInterface, _ stack.TransportDispatcher) stack.NetworkEndpoint { @@ -308,13 +307,13 @@ func (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot remoteLinkAddr = header.EthernetBroadcastAddress } - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { addr, err := e.nic.PrimaryAddress(header.IPv4ProtocolNumber) if err != nil { return err } - if len(addr.Address) == 0 { + if addr.Address.BitLen() == 0 { stats.outgoingRequestInterfaceHasNoLocalAddressErrors.Increment() return &tcpip.ErrNetworkUnreachable{} } @@ -340,10 +339,10 @@ func (e *endpoint) sendARPRequest(localAddr, targetAddr tcpip.Address, remoteLin // TODO(gvisor.dev/issue/4582): check copied length once TAP devices have a // link address. _ = copy(h.HardwareAddressSender(), e.nic.LinkAddress()) - if n := copy(h.ProtocolAddressSender(), localAddr); n != header.IPv4AddressSize { + if n := copy(h.ProtocolAddressSender(), localAddr.AsSlice()); n != header.IPv4AddressSize { panic(fmt.Sprintf("copied %d bytes, expected %d bytes", n, header.IPv4AddressSize)) } - if n := copy(h.ProtocolAddressTarget(), targetAddr); n != header.IPv4AddressSize { + if n := copy(h.ProtocolAddressTarget(), targetAddr.AsSlice()); n != header.IPv4AddressSize { panic(fmt.Sprintf("copied %d bytes, expected %d bytes", n, header.IPv4AddressSize)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/hash/hash.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/hash/hash.go index 8f65713c59..ff80187a6a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/hash/hash.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/hash/hash.go @@ -68,9 +68,9 @@ func Hash3Words(a, b, c, initval uint32) uint32 { // IPv4FragmentHash computes the hash of the IPv4 fragment as suggested in RFC 791. func IPv4FragmentHash(h header.IPv4) uint32 { x := uint32(h.ID())<<16 | uint32(h.Protocol()) - t := h.SourceAddress() + t := h.SourceAddress().As4() y := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 - t = h.DestinationAddress() + t = h.DestinationAddress().As4() z := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 return Hash3Words(x, y, z, hashIV) } @@ -81,9 +81,9 @@ func IPv4FragmentHash(h header.IPv4) uint32 { // As a reference, also Linux ignores the protocol to compute // the hash (inet6_hash_frag). func IPv6FragmentHash(h header.IPv6, id uint32) uint32 { - t := h.SourceAddress() + t := h.SourceAddress().As16() y := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 - t = h.DestinationAddress() + t = h.DestinationAddress().As16() z := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 return Hash3Words(id, y, z, hashIV) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go index 86695d73b6..39dc5ad02c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/fragmentation.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" @@ -158,25 +158,25 @@ func (f *Fragmentation) Process( id FragmentID, first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) ( stack.PacketBufferPtr, uint8, bool, error) { if first > last { - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs) + return nil, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs) } if first%f.blockSize != 0 { - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("first=%d is not a multiple of block size=%d: %w", first, f.blockSize, ErrInvalidArgs) + return nil, 0, false, fmt.Errorf("first=%d is not a multiple of block size=%d: %w", first, f.blockSize, ErrInvalidArgs) } fragmentSize := last - first + 1 if more && fragmentSize%f.blockSize != 0 { - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs) + return nil, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs) } if l := pkt.Data().Size(); l != int(fragmentSize) { - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs) + return nil, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs) } f.mu.Lock() if f.reassemblers == nil { - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("Release() called before fragmentation processing could finish") + return nil, 0, false, fmt.Errorf("Release() called before fragmentation processing could finish") } r, ok := f.reassemblers[id] @@ -201,7 +201,7 @@ func (f *Fragmentation) Process( f.mu.Lock() f.release(r, false /* timedOut */) f.mu.Unlock() - return stack.PacketBufferPtr{}, 0, false, fmt.Errorf("fragmentation processing error: %w", err) + return nil, 0, false, fmt.Errorf("fragmentation processing error: %w", err) } f.mu.Lock() f.memSize += memConsumed @@ -253,12 +253,12 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) { } if !r.pkt.IsNil() { r.pkt.DecRef() - r.pkt = stack.PacketBufferPtr{} + r.pkt = nil } for _, h := range r.holes { if !h.pkt.IsNil() { h.pkt.DecRef() - h.pkt = stack.PacketBufferPtr{} + h.pkt = nil } } r.holes = nil @@ -291,7 +291,7 @@ func (f *Fragmentation) releaseReassemblersLocked() { // PacketFragmenter is the book-keeping struct for packet fragmentation. type PacketFragmenter struct { transportHeader []byte - data bufferv2.Buffer + data buffer.Buffer reserve int fragmentPayloadLen int fragmentCount int @@ -316,7 +316,7 @@ func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32, // TODO(gvisor.dev/issue/3912): Once Authentication or ESP Headers are // supported for outbound packets, the fragmentable data should not include // these headers. - var fragmentableData bufferv2.Buffer + var fragmentableData buffer.Buffer fragmentableData.Append(pkt.TransportHeader().View()) pktBuf := pkt.Data().ToBuffer() fragmentableData.Merge(&pktBuf) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go index a9ba0587b2..873e034f68 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation/reassembler.go @@ -67,7 +67,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st // A concurrent goroutine might have already reassembled // the packet and emptied the heap while this goroutine // was waiting on the mutex. We don't have to do anything in this case. - return stack.PacketBufferPtr{}, 0, false, 0, nil + return nil, 0, false, 0, nil } var holeFound bool @@ -91,12 +91,12 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st // https://github.com/torvalds/linux/blob/38525c6/net/ipv4/inet_fragment.c#L349 if first < currentHole.first || currentHole.last < last { // Incoming fragment only partially fits in the free hole. - return stack.PacketBufferPtr{}, 0, false, 0, ErrFragmentOverlap + return nil, 0, false, 0, ErrFragmentOverlap } if !more { if !currentHole.final || currentHole.filled && currentHole.last != last { // We have another final fragment, which does not perfectly overlap. - return stack.PacketBufferPtr{}, 0, false, 0, ErrFragmentConflict + return nil, 0, false, 0, ErrFragmentConflict } } @@ -155,12 +155,12 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st } if !holeFound { // Incoming fragment is beyond end. - return stack.PacketBufferPtr{}, 0, false, 0, ErrFragmentConflict + return nil, 0, false, 0, ErrFragmentConflict } // Check if all the holes have been filled and we are ready to reassemble. if r.filled < len(r.holes) { - return stack.PacketBufferPtr{}, 0, false, memConsumed, nil + return nil, 0, false, memConsumed, nil } sort.Slice(r.holes, func(i, j int) bool { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go index 5ff59fd598..b381c4c033 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/errors.go @@ -34,6 +34,14 @@ func (*ErrTTLExceeded) isForwardingError() {} func (*ErrTTLExceeded) String() string { return "ttl exceeded" } +// ErrOutgoingDeviceNoBufferSpace indicates that the outgoing device does not +// have enough space to hold a buffer. +type ErrOutgoingDeviceNoBufferSpace struct{} + +func (*ErrOutgoingDeviceNoBufferSpace) isForwardingError() {} + +func (*ErrOutgoingDeviceNoBufferSpace) String() string { return "no device buffer space" } + // ErrParameterProblem indicates the received packet had a problem with an IP // parameter. type ErrParameterProblem struct{} @@ -42,13 +50,22 @@ func (*ErrParameterProblem) isForwardingError() {} func (*ErrParameterProblem) String() string { return "parameter problem" } +// ErrInitializingSourceAddress indicates the received packet had a source +// address that may only be used on the local network as part of initialization +// work. +type ErrInitializingSourceAddress struct{} + +func (*ErrInitializingSourceAddress) isForwardingError() {} + +func (*ErrInitializingSourceAddress) String() string { return "initializing source address" } + // ErrLinkLocalSourceAddress indicates the received packet had a link-local // source address. type ErrLinkLocalSourceAddress struct{} func (*ErrLinkLocalSourceAddress) isForwardingError() {} -func (*ErrLinkLocalSourceAddress) String() string { return "link local destination address" } +func (*ErrLinkLocalSourceAddress) String() string { return "link local source address" } // ErrLinkLocalDestinationAddress indicates the received packet had a link-local // destination address. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go index 428907e949..884ea0562c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/generic_multicast_protocol.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" ) const ( @@ -68,6 +69,38 @@ const ( // address and any multicast addresses of scope 0 (reserved) or 1 // (node-local). minQueryResponseTransmissionCount = 1 + + // DefaultRobustnessVariable is the default robustness variable + // + // As per RFC 3810 section 9.1 (for MLDv2), + // + // The Robustness Variable allows tuning for the expected packet loss on + // a link. If a link is expected to be lossy, the value of the + // Robustness Variable may be increased. MLD is robust to [Robustness + // Variable] - 1 packet losses. The value of the Robustness Variable + // MUST NOT be zero, and SHOULD NOT be one. Default value: 2. + // + // As per RFC 3376 section 8.1 (for IGMPv3), + // + // The Robustness Variable allows tuning for the expected packet loss on + // a network. If a network is expected to be lossy, the Robustness + // Variable may be increased. IGMP is robust to (Robustness Variable - + // 1) packet losses. The Robustness Variable MUST NOT be zero, and + // SHOULD NOT be one. Default: 2 + DefaultRobustnessVariable = 2 + + // DefaultQueryInterval is the default query interval. + // + // As per RFC 3810 section 9.2 (for MLDv2), + // + // The Query Interval variable denotes the interval between General + // Queries sent by the Querier. Default value: 125 seconds. + // + // As per RFC 3376 section 8.2 (for IGMPv3), + // + // The Query Interval is the interval between General Queries sent by + // the Querier. Default: 125 seconds. + DefaultQueryInterval = 125 * time.Second ) // multicastGroupState holds the Generic Multicast Protocol state for a @@ -77,7 +110,7 @@ type multicastGroupState struct { joins uint64 // transmissionLeft is the number of transmissions left to send. - transmissionLeft uint + transmissionLeft uint8 // lastToSendReport is true if we sent the last report for the group. It is // used to track whether there are other hosts on the subnet that are also @@ -98,6 +131,14 @@ type multicastGroupState struct { // // A zero value indicates that the job is not scheduled. delayedReportJobFiresAt time.Time + + // queriedIncludeSources holds sources that were queried for. + // + // Indicates that there is a pending source-specific query response for the + // multicast address. + queriedIncludeSources map[tcpip.Address]struct{} + + deleteScheduled bool } func (m *multicastGroupState) cancelDelayedReportJob() { @@ -106,6 +147,12 @@ func (m *multicastGroupState) cancelDelayedReportJob() { m.transmissionLeft = 0 } +func (m *multicastGroupState) clearQueriedIncludeSources() { + for source := range m.queriedIncludeSources { + delete(m.queriedIncludeSources, source) + } +} + // GenericMulticastProtocolOptions holds options for the generic multicast // protocol. type GenericMulticastProtocolOptions struct { @@ -126,6 +173,34 @@ type GenericMulticastProtocolOptions struct { MaxUnsolicitedReportDelay time.Duration } +// MulticastGroupProtocolV2ReportRecordType is the type of a +// MulticastGroupProtocolv2 multicast address record. +type MulticastGroupProtocolV2ReportRecordType int + +// MulticastGroupProtocolv2 multicast address record types. +const ( + _ MulticastGroupProtocolV2ReportRecordType = iota + MulticastGroupProtocolV2ReportRecordModeIsInclude + MulticastGroupProtocolV2ReportRecordModeIsExclude + MulticastGroupProtocolV2ReportRecordChangeToIncludeMode + MulticastGroupProtocolV2ReportRecordChangeToExcludeMode + MulticastGroupProtocolV2ReportRecordAllowNewSources + MulticastGroupProtocolV2ReportRecordBlockOldSources +) + +// MulticastGroupProtocolV2ReportBuilder is a builder for a V2 report. +type MulticastGroupProtocolV2ReportBuilder interface { + // AddRecord adds a record to the report. + AddRecord(recordType MulticastGroupProtocolV2ReportRecordType, groupAddress tcpip.Address) + + // Send sends the report. + // + // Does nothing if no records were added. + // + // It is invalid to use this builder after this method is called. + Send() (sent bool, err tcpip.Error) +} + // MulticastGroupProtocol is a multicast group protocol whose core state machine // can be represented by GenericMulticastProtocolState. type MulticastGroupProtocol interface { @@ -152,8 +227,27 @@ type MulticastGroupProtocol interface { // ShouldPerformProtocol returns true iff the protocol should be performed for // the specified group. ShouldPerformProtocol(tcpip.Address) bool + + // NewReportV2Builder creates a new V2 builder. + NewReportV2Builder() MulticastGroupProtocolV2ReportBuilder + + // V2QueryMaxRespCodeToV2Delay takes a V2 query's maximum response code and + // returns the V2 delay. + V2QueryMaxRespCodeToV2Delay(code uint16) time.Duration + + // V2QueryMaxRespCodeToV1Delay takes a V2 query's maximum response code and + // returns the V1 delay. + V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration } +type protocolMode int + +const ( + protocolModeV2 protocolMode = iota + protocolModeV1 + protocolModeV1Compatibility +) + // GenericMulticastProtocolState is the per interface generic multicast protocol // state. // @@ -184,6 +278,71 @@ type GenericMulticastProtocolState struct { // protocolMU is the mutex used to protect the protocol. protocolMU *sync.RWMutex + + // V2 state. + robustnessVariable uint8 + queryInterval time.Duration + mode protocolMode + modeTimer tcpip.Timer + + generalQueryV2Timer tcpip.Timer + generalQueryV2TimerFiresAt time.Time + + stateChangedReportV2Timer tcpip.Timer + stateChangedReportV2TimerSet bool +} + +// GetV1ModeLocked returns the V1 configuration. +// +// Precondition: g.protocolMU must be read locked. +func (g *GenericMulticastProtocolState) GetV1ModeLocked() bool { + switch g.mode { + case protocolModeV2, protocolModeV1Compatibility: + return false + case protocolModeV1: + return true + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } +} + +func (g *GenericMulticastProtocolState) stopModeTimer() { + if g.modeTimer != nil { + g.modeTimer.Stop() + } +} + +// SetV1ModeLocked sets the V1 configuration. +// +// Returns the previous configuration. +// +// Precondition: g.protocolMU must be locked. +func (g *GenericMulticastProtocolState) SetV1ModeLocked(v bool) bool { + if g.GetV1ModeLocked() == v { + return v + } + + if v { + g.stopModeTimer() + g.cancelV2ReportTimers() + g.mode = protocolModeV1 + return false + } + + g.mode = protocolModeV2 + return true +} + +func (g *GenericMulticastProtocolState) cancelV2ReportTimers() { + if g.generalQueryV2Timer != nil { + g.generalQueryV2Timer.Stop() + g.generalQueryV2TimerFiresAt = time.Time{} + } + + if g.stateChangedReportV2Timer != nil { + g.stateChangedReportV2Timer.Stop() + g.stateChangedReportV2TimerSet = false + } } // Init initializes the Generic Multicast Protocol state. @@ -202,9 +361,12 @@ func (g *GenericMulticastProtocolState) Init(protocolMU *sync.RWMutex, opts Gene } *g = GenericMulticastProtocolState{ - opts: opts, - memberships: make(map[tcpip.Address]multicastGroupState), - protocolMU: protocolMU, + opts: opts, + memberships: make(map[tcpip.Address]multicastGroupState), + protocolMU: protocolMU, + robustnessVariable: DefaultRobustnessVariable, + queryInterval: DefaultQueryInterval, + mode: protocolModeV2, } } @@ -220,9 +382,50 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() { return } + g.stopModeTimer() + g.cancelV2ReportTimers() + + var v2ReportBuilder MulticastGroupProtocolV2ReportBuilder + var handler func(tcpip.Address, *multicastGroupState) + switch g.mode { + case protocolModeV2: + v2ReportBuilder = g.opts.Protocol.NewReportV2Builder() + handler = func(groupAddress tcpip.Address, _ *multicastGroupState) { + // Send a report immediately to announce us leaving the group. + v2ReportBuilder.AddRecord( + MulticastGroupProtocolV2ReportRecordChangeToIncludeMode, + groupAddress, + ) + } + case protocolModeV1Compatibility: + g.mode = protocolModeV2 + fallthrough + case protocolModeV1: + handler = g.transitionToNonMemberLocked + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + for groupAddress, info := range g.memberships { - g.transitionToNonMemberLocked(groupAddress, &info) - g.memberships[groupAddress] = info + if !g.shouldPerformForGroup(groupAddress) { + continue + } + + handler(groupAddress, &info) + + if info.deleteScheduled { + delete(g.memberships, groupAddress) + } else { + info.transmissionLeft = 0 + g.memberships[groupAddress] = info + } + } + + if v2ReportBuilder != nil { + // Nothing meaningful we can do with the error here - this method may be + // called when an interface is being disabled when we expect sends to + // fail. + _, _ = v2ReportBuilder.Send() } } @@ -238,10 +441,41 @@ func (g *GenericMulticastProtocolState) InitializeGroupsLocked() { return } + var v2ReportBuilder MulticastGroupProtocolV2ReportBuilder + switch g.mode { + case protocolModeV2: + v2ReportBuilder = g.opts.Protocol.NewReportV2Builder() + case protocolModeV1Compatibility, protocolModeV1: + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + for groupAddress, info := range g.memberships { - g.initializeNewMemberLocked(groupAddress, &info) + g.initializeNewMemberLocked(groupAddress, &info, v2ReportBuilder) g.memberships[groupAddress] = info } + + if v2ReportBuilder == nil { + return + } + + if sent, err := v2ReportBuilder.Send(); sent && err == nil { + g.scheduleStateChangedTimer() + } else { + // Nothing meaningful we could do with the error here - the interface may + // not yet have an address. This is okay because we would either schedule a + // report to be sent later or we will be notified when an address is added, + // at which point we will try to send messages again. + for groupAddress, info := range g.memberships { + if !g.shouldPerformForGroup(groupAddress) { + continue + } + + // Revert the transmissions count since we did not successfully send. + info.transmissionLeft++ + g.memberships[groupAddress] = info + } + } } // SendQueuedReportsLocked attempts to send reports for groups that failed to @@ -249,9 +483,21 @@ func (g *GenericMulticastProtocolState) InitializeGroupsLocked() { // // Precondition: g.protocolMU must be locked. func (g *GenericMulticastProtocolState) SendQueuedReportsLocked() { + if g.stateChangedReportV2TimerSet { + return + } + for groupAddress, info := range g.memberships { if info.delayedReportJobFiresAt.IsZero() { - g.maybeSendReportLocked(groupAddress, &info) + switch g.mode { + case protocolModeV2: + g.sendV2ReportAndMaybeScheduleChangedTimer(groupAddress, &info, MulticastGroupProtocolV2ReportRecordChangeToExcludeMode) + case protocolModeV1Compatibility, protocolModeV1: + g.maybeSendReportLocked(groupAddress, &info) + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + g.memberships[groupAddress] = info } } @@ -261,37 +507,56 @@ func (g *GenericMulticastProtocolState) SendQueuedReportsLocked() { // // Precondition: g.protocolMU must be locked. func (g *GenericMulticastProtocolState) JoinGroupLocked(groupAddress tcpip.Address) { - if info, ok := g.memberships[groupAddress]; ok { - // The group has already been joined. + info, ok := g.memberships[groupAddress] + if ok { info.joins++ - g.memberships[groupAddress] = info - return - } + if info.joins > 1 { + // The group has already been joined. + g.memberships[groupAddress] = info + return + } + } else { + info = multicastGroupState{ + // Since we just joined the group, its count is 1. + joins: 1, + lastToSendReport: false, + delayedReportJob: tcpip.NewJob(g.opts.Clock, g.protocolMU, func() { + if !g.opts.Protocol.Enabled() { + panic(fmt.Sprintf("delayed report job fired for group %s while the multicast group protocol is disabled", groupAddress)) + } - info := multicastGroupState{ - // Since we just joined the group, its count is 1. - joins: 1, - lastToSendReport: false, - delayedReportJob: tcpip.NewJob(g.opts.Clock, g.protocolMU, func() { - if !g.opts.Protocol.Enabled() { - panic(fmt.Sprintf("delayed report job fired for group %s while the multicast group protocol is disabled", groupAddress)) - } + info, ok := g.memberships[groupAddress] + if !ok { + panic(fmt.Sprintf("expected to find group state for group = %s", groupAddress)) + } - info, ok := g.memberships[groupAddress] - if !ok { - panic(fmt.Sprintf("expected to find group state for group = %s", groupAddress)) - } + info.delayedReportJobFiresAt = time.Time{} - info.delayedReportJobFiresAt = time.Time{} - g.maybeSendReportLocked(groupAddress, &info) - g.memberships[groupAddress] = info - }), - } + switch g.mode { + case protocolModeV2: + reportBuilder := g.opts.Protocol.NewReportV2Builder() + reportBuilder.AddRecord(MulticastGroupProtocolV2ReportRecordModeIsExclude, groupAddress) + // Nothing meaningful we can do with the error here - we only try to + // send a delayed report once. + _, _ = reportBuilder.Send() + case protocolModeV1Compatibility, protocolModeV1: + g.maybeSendReportLocked(groupAddress, &info) + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } - if g.opts.Protocol.Enabled() { - g.initializeNewMemberLocked(groupAddress, &info) + info.clearQueriedIncludeSources() + g.memberships[groupAddress] = info + }), + queriedIncludeSources: make(map[tcpip.Address]struct{}), + } } + info.deleteScheduled = false + info.clearQueriedIncludeSources() + info.delayedReportJobFiresAt = time.Time{} + info.lastToSendReport = false + g.initializeNewMemberLocked(groupAddress, &info, nil /* callersV2ReportBuilder */) g.memberships[groupAddress] = info } @@ -299,8 +564,91 @@ func (g *GenericMulticastProtocolState) JoinGroupLocked(groupAddress tcpip.Addre // // Precondition: g.protocolMU must be read locked. func (g *GenericMulticastProtocolState) IsLocallyJoinedRLocked(groupAddress tcpip.Address) bool { - _, ok := g.memberships[groupAddress] - return ok + info, ok := g.memberships[groupAddress] + return ok && !info.deleteScheduled +} + +func (g *GenericMulticastProtocolState) sendV2ReportAndMaybeScheduleChangedTimer( + groupAddress tcpip.Address, + info *multicastGroupState, + recordType MulticastGroupProtocolV2ReportRecordType, +) bool { + if info.transmissionLeft == 0 { + return false + } + + successfullySentAndHasMore := false + + // Send a report immediately to announce us leaving the group. + reportBuilder := g.opts.Protocol.NewReportV2Builder() + reportBuilder.AddRecord(recordType, groupAddress) + if sent, err := reportBuilder.Send(); sent && err == nil { + info.transmissionLeft-- + + successfullySentAndHasMore = info.transmissionLeft != 0 + + // Use the interface-wide state changed report for further transmissions. + if successfullySentAndHasMore { + g.scheduleStateChangedTimer() + } + } + + return successfullySentAndHasMore +} + +func (g *GenericMulticastProtocolState) scheduleStateChangedTimer() { + if g.stateChangedReportV2TimerSet { + return + } + + delay := g.calculateDelayTimerDuration(g.opts.MaxUnsolicitedReportDelay) + if g.stateChangedReportV2Timer == nil { + // TODO(https://issuetracker.google.com/264799098): Create timer on + // initialization instead of lazily creating the timer since the timer + // does not change after being created. + g.stateChangedReportV2Timer = g.opts.Clock.AfterFunc(delay, func() { + g.protocolMU.Lock() + defer g.protocolMU.Unlock() + + reportBuilder := g.opts.Protocol.NewReportV2Builder() + nonEmptyReport := false + for groupAddress, info := range g.memberships { + if info.transmissionLeft == 0 || !g.shouldPerformForGroup(groupAddress) { + continue + } + + info.transmissionLeft-- + nonEmptyReport = true + + mode := MulticastGroupProtocolV2ReportRecordChangeToExcludeMode + if info.deleteScheduled { + mode = MulticastGroupProtocolV2ReportRecordChangeToIncludeMode + } + reportBuilder.AddRecord(mode, groupAddress) + + if info.deleteScheduled && info.transmissionLeft == 0 { + // No more transmissions left so we can actually delete the + // membership. + delete(g.memberships, groupAddress) + } else { + g.memberships[groupAddress] = info + } + } + + // Nothing meaningful we can do with the error here. We will retry + // sending a state changed report again anyways. + _, _ = reportBuilder.Send() + + if nonEmptyReport { + g.stateChangedReportV2Timer.Reset(g.calculateDelayTimerDuration(g.opts.MaxUnsolicitedReportDelay)) + } else { + g.stateChangedReportV2TimerSet = false + } + }) + } else { + g.stateChangedReportV2Timer.Reset(delay) + } + g.stateChangedReportV2TimerSet = true } // LeaveGroupLocked handles leaving the group. @@ -310,13 +658,10 @@ func (g *GenericMulticastProtocolState) IsLocallyJoinedRLocked(groupAddress tcpi // Precondition: g.protocolMU must be locked. func (g *GenericMulticastProtocolState) LeaveGroupLocked(groupAddress tcpip.Address) bool { info, ok := g.memberships[groupAddress] - if !ok { + if !ok || info.joins == 0 { return false } - if info.joins == 0 { - panic(fmt.Sprintf("tried to leave group %s with a join count of 0", groupAddress)) - } info.joins-- if info.joins != 0 { // If we still have outstanding joins, then do nothing further. @@ -324,11 +669,210 @@ func (g *GenericMulticastProtocolState) LeaveGroupLocked(groupAddress tcpip.Addr return true } - g.transitionToNonMemberLocked(groupAddress, &info) - delete(g.memberships, groupAddress) + info.deleteScheduled = true + info.cancelDelayedReportJob() + + if !g.shouldPerformForGroup(groupAddress) { + delete(g.memberships, groupAddress) + return true + } + + switch g.mode { + case protocolModeV2: + info.transmissionLeft = g.robustnessVariable + if g.sendV2ReportAndMaybeScheduleChangedTimer(groupAddress, &info, MulticastGroupProtocolV2ReportRecordChangeToIncludeMode) { + g.memberships[groupAddress] = info + } else { + delete(g.memberships, groupAddress) + } + case protocolModeV1Compatibility, protocolModeV1: + g.transitionToNonMemberLocked(groupAddress, &info) + delete(g.memberships, groupAddress) + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + return true } +// HandleQueryV2Locked handles a V2 query. +// +// Precondition: g.protocolMU must be locked. +func (g *GenericMulticastProtocolState) HandleQueryV2Locked(groupAddress tcpip.Address, maxResponseCode uint16, sources header.AddressIterator, robustnessVariable uint8, queryInterval time.Duration) { + if !g.opts.Protocol.Enabled() { + return + } + + switch g.mode { + case protocolModeV1Compatibility, protocolModeV1: + g.handleQueryInnerLocked(groupAddress, g.opts.Protocol.V2QueryMaxRespCodeToV1Delay(maxResponseCode)) + return + case protocolModeV2: + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + + if robustnessVariable != 0 { + g.robustnessVariable = robustnessVariable + } + + if queryInterval != 0 { + g.queryInterval = queryInterval + } + + maxResponseTime := g.calculateDelayTimerDuration(g.opts.Protocol.V2QueryMaxRespCodeToV2Delay(maxResponseCode)) + + // As per RFC 3376 section 5.2, + // + // 1. If there is a pending response to a previous General Query + // scheduled sooner than the selected delay, no additional response + // needs to be scheduled. + // + // 2. If the received Query is a General Query, the interface timer is + // used to schedule a response to the General Query after the + // selected delay. Any previously pending response to a General + // Query is canceled. + // + // 3. If the received Query is a Group-Specific Query or a Group-and- + // Source-Specific Query and there is no pending response to a + // previous Query for this group, then the group timer is used to + // schedule a report. If the received Query is a Group-and-Source- + // Specific Query, the list of queried sources is recorded to be used + // when generating a response. + // + // 4. If there already is a pending response to a previous Query + // scheduled for this group, and either the new Query is a Group- + // Specific Query or the recorded source-list associated with the + // group is empty, then the group source-list is cleared and a single + // response is scheduled using the group timer. The new response is + // scheduled to be sent at the earliest of the remaining time for the + // pending report and the selected delay. + // + // 5. If the received Query is a Group-and-Source-Specific Query and + // there is a pending response for this group with a non-empty + // source-list, then the group source list is augmented to contain + // the list of sources in the new Query and a single response is + // scheduled using the group timer. The new response is scheduled to + // be sent at the earliest of the remaining time for the pending + // report and the selected delay. + // + // As per RFC 3810 section 6.2, + // + // 1. If there is a pending response to a previous General Query + // scheduled sooner than the selected delay, no additional response + // needs to be scheduled. + // + // 2. If the received Query is a General Query, the Interface Timer is + // used to schedule a response to the General Query after the + // selected delay. Any previously pending response to a General + // Query is canceled. + // + // 3. If the received Query is a Multicast Address Specific Query or a + // Multicast Address and Source Specific Query and there is no + // pending response to a previous Query for this multicast address, + // then the Multicast Address Timer is used to schedule a report. If + // the received Query is a Multicast Address and Source Specific + // Query, the list of queried sources is recorded to be used when + // generating a response. + // + // 4. If there is already a pending response to a previous Query + // scheduled for this multicast address, and either the new Query is + // a Multicast Address Specific Query or the recorded source list + // associated with the multicast address is empty, then the multicast + // address source list is cleared and a single response is scheduled, + // using the Multicast Address Timer. The new response is scheduled + // to be sent at the earliest of the remaining time for the pending + // report and the selected delay. + // + // 5. If the received Query is a Multicast Address and Source Specific + // Query and there is a pending response for this multicast address + // with a non-empty source list, then the multicast address source + // list is augmented to contain the list of sources in the new Query, + // and a single response is scheduled using the Multicast Address + // Timer. The new response is scheduled to be sent at the earliest + // of the remaining time for the pending report and the selected + // delay. + now := g.opts.Clock.Now() + if !g.generalQueryV2TimerFiresAt.IsZero() && g.generalQueryV2TimerFiresAt.Sub(now) <= maxResponseTime { + return + } + + if groupAddress.Unspecified() { + if g.generalQueryV2Timer == nil { + // TODO(https://issuetracker.google.com/264799098): Create timer on + // initialization instead of lazily creating the timer since the timer + // does not change after being created. + g.generalQueryV2Timer = g.opts.Clock.AfterFunc(maxResponseTime, func() { + g.protocolMU.Lock() + defer g.protocolMU.Unlock() + + g.generalQueryV2TimerFiresAt = time.Time{} + + // As per RFC 3810 section 6.3, + // + // If the expired timer is the Interface Timer (i.e., there is a + // pending response to a General Query), then one Current State + // Record is sent for each multicast address for which the specified + // interface has listening state, as described in section 4.2. The + // Current State Record carries the multicast address and its + // associated filter mode (MODE_IS_INCLUDE or MODE_IS_EXCLUDE) and + // Source list. Multiple Current State Records are packed into + // individual Report messages, to the extent possible. + // + // As per RFC 3376 section 5.2, + // + // If the expired timer is the interface timer (i.e., it is a pending + // response to a General Query), then one Current-State Record is + // sent for each multicast address for which the specified interface + // has reception state, as described in section 3.2. The Current- + // State Record carries the multicast address and its associated + // filter mode (MODE_IS_INCLUDE or MODE_IS_EXCLUDE) and source list. + // Multiple Current-State Records are packed into individual Report + // messages, to the extent possible. + reportBuilder := g.opts.Protocol.NewReportV2Builder() + for groupAddress, info := range g.memberships { + if info.deleteScheduled || !g.shouldPerformForGroup(groupAddress) { + continue + } + + // A MODE_IS_EXCLUDE record without any sources indicates that we are + // interested in traffic from all sources for the group. + // + // We currently only hold groups if we have an active interest in the + // group. + reportBuilder.AddRecord( + MulticastGroupProtocolV2ReportRecordModeIsExclude, + groupAddress, + ) + } + + _, _ = reportBuilder.Send() + }) + } else { + g.generalQueryV2Timer.Reset(maxResponseTime) + } + g.generalQueryV2TimerFiresAt = now.Add(maxResponseTime) + return + } + + if info, ok := g.memberships[groupAddress]; ok && !info.deleteScheduled && g.shouldPerformForGroup(groupAddress) { + if info.delayedReportJobFiresAt.IsZero() || (!sources.Done() && len(info.queriedIncludeSources) != 0) { + for { + source, ok := sources.Next() + if !ok { + break + } + + info.queriedIncludeSources[source] = struct{}{} + } + } else { + info.clearQueriedIncludeSources() + } + g.setDelayTimerForAddressLocked(groupAddress, &info, maxResponseTime) + g.memberships[groupAddress] = info + } +} + // HandleQueryLocked handles a query message with the specified maximum response // time. // @@ -344,6 +888,53 @@ func (g *GenericMulticastProtocolState) HandleQueryLocked(groupAddress tcpip.Add return } + switch g.mode { + case protocolModeV2, protocolModeV1Compatibility: + // As per 3376 section 8.12 (for IGMPv3), + // + // The Older Version Querier Interval is the time-out for transitioning + // a host back to IGMPv3 mode once an older version query is heard. + // When an older version query is received, hosts set their Older + // Version Querier Present Timer to Older Version Querier Interval. + // + // This value MUST be ((the Robustness Variable) times (the Query + // Interval in the last Query received)) plus (one Query Response + // Interval). + // + // As per RFC 3810 section 9.12 (for MLDv2), + // + // The Older Version Querier Present Timeout is the time-out for + // transitioning a host back to MLDv2 Host Compatibility Mode. When an + // MLDv1 query is received, MLDv2 hosts set their Older Version Querier + // Present Timer to [Older Version Querier Present Timeout]. + // + // This value MUST be ([Robustness Variable] times (the [Query Interval] + // in the last Query received)) plus ([Query Response Interval]). + modeRevertDelay := time.Duration(g.robustnessVariable) * g.queryInterval + if g.modeTimer == nil { + // TODO(https://issuetracker.google.com/264799098): Create timer on + // initialization instead of lazily creating the timer since the timer + // does not change after being created. + g.modeTimer = g.opts.Clock.AfterFunc(modeRevertDelay, func() { + g.protocolMU.Lock() + defer g.protocolMU.Unlock() + g.mode = protocolModeV2 + }) + } else { + g.modeTimer.Reset(modeRevertDelay) + } + g.mode = protocolModeV1Compatibility + g.cancelV2ReportTimers() + case protocolModeV1: + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) + } + g.handleQueryInnerLocked(groupAddress, maxResponseTime) +} + +func (g *GenericMulticastProtocolState) handleQueryInnerLocked(groupAddress tcpip.Address, maxResponseTime time.Duration) { + maxResponseTime = g.calculateDelayTimerDuration(maxResponseTime) + // As per RFC 2236 section 2.4 (for IGMPv2), // // In a Membership Query message, the group address field is set to zero @@ -361,7 +952,7 @@ func (g *GenericMulticastProtocolState) HandleQueryLocked(groupAddress tcpip.Add g.setDelayTimerForAddressLocked(groupAddress, &info, maxResponseTime) g.memberships[groupAddress] = info } - } else if info, ok := g.memberships[groupAddress]; ok { + } else if info, ok := g.memberships[groupAddress]; ok && !info.deleteScheduled { g.setDelayTimerForAddressLocked(groupAddress, &info, maxResponseTime) g.memberships[groupAddress] = info } @@ -400,11 +991,27 @@ func (g *GenericMulticastProtocolState) HandleReportLocked(groupAddress tcpip.Ad // initializeNewMemberLocked initializes a new group membership. // // Precondition: g.protocolMU must be locked. -func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress tcpip.Address, info *multicastGroupState) { +func (g *GenericMulticastProtocolState) initializeNewMemberLocked(groupAddress tcpip.Address, info *multicastGroupState, callersV2ReportBuilder MulticastGroupProtocolV2ReportBuilder) { + if !g.shouldPerformForGroup(groupAddress) { + return + } + info.lastToSendReport = false - if g.shouldPerformForGroup(groupAddress) { + + switch g.mode { + case protocolModeV2: + info.transmissionLeft = g.robustnessVariable + if callersV2ReportBuilder == nil { + g.sendV2ReportAndMaybeScheduleChangedTimer(groupAddress, info, MulticastGroupProtocolV2ReportRecordChangeToExcludeMode) + } else { + callersV2ReportBuilder.AddRecord(MulticastGroupProtocolV2ReportRecordChangeToExcludeMode, groupAddress) + info.transmissionLeft-- + } + case protocolModeV1Compatibility, protocolModeV1: info.transmissionLeft = unsolicitedTransmissionCount g.maybeSendReportLocked(groupAddress, info) + default: + panic(fmt.Sprintf("unrecognized mode = %d", g.mode)) } } @@ -443,7 +1050,11 @@ func (g *GenericMulticastProtocolState) maybeSendReportLocked(groupAddress tcpip info.transmissionLeft-- if info.transmissionLeft > 0 { - g.setDelayTimerForAddressLocked(groupAddress, info, g.opts.MaxUnsolicitedReportDelay) + g.setDelayTimerForAddressLocked( + groupAddress, + info, + g.calculateDelayTimerDuration(g.opts.MaxUnsolicitedReportDelay), + ) } } } @@ -509,10 +1120,6 @@ func (g *GenericMulticastProtocolState) maybeSendLeave(groupAddress tcpip.Addres // // Precondition: g.protocolMU must be locked. func (g *GenericMulticastProtocolState) transitionToNonMemberLocked(groupAddress tcpip.Address, info *multicastGroupState) { - if !g.shouldPerformForGroup(groupAddress) { - return - } - info.cancelDelayedReportJob() g.maybeSendLeave(groupAddress, info.lastToSendReport) info.lastToSendReport = false @@ -548,7 +1155,6 @@ func (g *GenericMulticastProtocolState) setDelayTimerForAddressLocked(groupAddre return } - maxResponseTime = g.calculateDelayTimerDuration(maxResponseTime) info.delayedReportJob.Cancel() info.delayedReportJob.Schedule(maxResponseTime) info.delayedReportJobFiresAt = now.Add(maxResponseTime) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go index 83b0b63fbc..d1e112a0cb 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/internal/ip/stats.go @@ -30,6 +30,11 @@ type MultiCounterIPForwardingStats struct { // because their TTL was exhausted. ExhaustedTTL tcpip.MultiCounterStat + // InitializingSource is the number of IP packets which were dropped + // because they contained a source address that may only be used on the local + // network as part of initialization work. + InitializingSource tcpip.MultiCounterStat + // LinkLocalSource is the number of IP packets which were dropped // because they contained a link-local source address. LinkLocalSource tcpip.MultiCounterStat @@ -64,6 +69,10 @@ type MultiCounterIPForwardingStats struct { // were dropped due to insufficent buffer space in the pending packet queue. NoMulticastPendingQueueBufferSpace tcpip.MultiCounterStat + // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due + // to insufficient space in the outgoing device. + OutgoingDeviceNoBufferSpace tcpip.MultiCounterStat + // Errors is the number of IP packets received which could not be // successfully forwarded. Errors tcpip.MultiCounterStat @@ -73,6 +82,7 @@ type MultiCounterIPForwardingStats struct { func (m *MultiCounterIPForwardingStats) Init(a, b *tcpip.IPForwardingStats) { m.Unrouteable.Init(a.Unrouteable, b.Unrouteable) m.Errors.Init(a.Errors, b.Errors) + m.InitializingSource.Init(a.InitializingSource, b.InitializingSource) m.LinkLocalSource.Init(a.LinkLocalSource, b.LinkLocalSource) m.LinkLocalDestination.Init(a.LinkLocalDestination, b.LinkLocalDestination) m.ExtensionHeaderProblem.Init(a.ExtensionHeaderProblem, b.ExtensionHeaderProblem) @@ -82,6 +92,7 @@ func (m *MultiCounterIPForwardingStats) Init(a, b *tcpip.IPForwardingStats) { m.UnexpectedMulticastInputInterface.Init(a.UnexpectedMulticastInputInterface, b.UnexpectedMulticastInputInterface) m.UnknownOutputEndpoint.Init(a.UnknownOutputEndpoint, b.UnknownOutputEndpoint) m.NoMulticastPendingQueueBufferSpace.Init(a.NoMulticastPendingQueueBufferSpace, b.NoMulticastPendingQueueBufferSpace) + m.OutgoingDeviceNoBufferSpace.Init(a.OutgoingDeviceNoBufferSpace, b.OutgoingDeviceNoBufferSpace) } // LINT.ThenChange(:MultiCounterIPForwardingStats, ../../../tcpip.go:IPForwardingStats) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go index 7823f991a1..875eca4732 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/icmp.go @@ -17,7 +17,7 @@ package ipv4 import ( "fmt" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/checksum" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -68,6 +68,28 @@ func (*icmpv4DestinationHostUnreachableSockError) Kind() stack.TransportErrorKin return stack.DestinationHostUnreachableTransportError } +var _ stack.TransportError = (*icmpv4DestinationNetUnreachableSockError)(nil) + +// icmpv4DestinationNetUnreachableSockError is an ICMPv4 Destination Net +// Unreachable error. +// +// It indicates that a packet was not able to reach the destination network. +// +// +stateify savable +type icmpv4DestinationNetUnreachableSockError struct { + icmpv4DestinationUnreachableSockError +} + +// Code implements tcpip.SockErrorCause. +func (*icmpv4DestinationNetUnreachableSockError) Code() uint8 { + return uint8(header.ICMPv4NetUnreachable) +} + +// Kind implements stack.TransportError. +func (*icmpv4DestinationNetUnreachableSockError) Kind() stack.TransportErrorKind { + return stack.DestinationNetworkUnreachableTransportError +} + var _ stack.TransportError = (*icmpv4DestinationPortUnreachableSockError)(nil) // icmpv4DestinationPortUnreachableSockError is an ICMPv4 Destination Port @@ -91,6 +113,89 @@ func (*icmpv4DestinationPortUnreachableSockError) Kind() stack.TransportErrorKin return stack.DestinationPortUnreachableTransportError } +var _ stack.TransportError = (*icmpv4DestinationProtoUnreachableSockError)(nil) + +// icmpv4DestinationProtoUnreachableSockError is an ICMPv4 Destination Protocol +// Unreachable error. +// +// It indicates that a packet reached the destination host, but the transport +// protocol was not reachable +// +// +stateify savable +type icmpv4DestinationProtoUnreachableSockError struct { + icmpv4DestinationUnreachableSockError +} + +// Code implements tcpip.SockErrorCause. +func (*icmpv4DestinationProtoUnreachableSockError) Code() uint8 { + return uint8(header.ICMPv4ProtoUnreachable) +} + +// Kind implements stack.TransportError. +func (*icmpv4DestinationProtoUnreachableSockError) Kind() stack.TransportErrorKind { + return stack.DestinationProtoUnreachableTransportError +} + +var _ stack.TransportError = (*icmpv4SourceRouteFailedSockError)(nil) + +// icmpv4SourceRouteFailedSockError is an ICMPv4 Destination Unreachable error +// due to source route failed. +// +// +stateify savable +type icmpv4SourceRouteFailedSockError struct { + icmpv4DestinationUnreachableSockError +} + +// Code implements tcpip.SockErrorCause. +func (*icmpv4SourceRouteFailedSockError) Code() uint8 { + return uint8(header.ICMPv4SourceRouteFailed) +} + +// Kind implements stack.TransportError. +func (*icmpv4SourceRouteFailedSockError) Kind() stack.TransportErrorKind { + return stack.SourceRouteFailedTransportError +} + +var _ stack.TransportError = (*icmpv4SourceHostIsolatedSockError)(nil) + +// icmpv4SourceHostIsolatedSockError is an ICMPv4 Destination Unreachable error +// due to source host isolated (not on the network). +// +// +stateify savable +type icmpv4SourceHostIsolatedSockError struct { + icmpv4DestinationUnreachableSockError +} + +// Code implements tcpip.SockErrorCause. +func (*icmpv4SourceHostIsolatedSockError) Code() uint8 { + return uint8(header.ICMPv4SourceHostIsolated) +} + +// Kind implements stack.TransportError. +func (*icmpv4SourceHostIsolatedSockError) Kind() stack.TransportErrorKind { + return stack.SourceHostIsolatedTransportError +} + +var _ stack.TransportError = (*icmpv4DestinationHostUnknownSockError)(nil) + +// icmpv4DestinationHostUnknownSockError is an ICMPv4 Destination Unreachable +// error due to destination host unknown/down. +// +// +stateify savable +type icmpv4DestinationHostUnknownSockError struct { + icmpv4DestinationUnreachableSockError +} + +// Code implements tcpip.SockErrorCause. +func (*icmpv4DestinationHostUnknownSockError) Code() uint8 { + return uint8(header.ICMPv4DestinationHostUnknown) +} + +// Kind implements stack.TransportError. +func (*icmpv4DestinationHostUnknownSockError) Kind() stack.TransportErrorKind { + return stack.DestinationHostDownTransportError +} + var _ stack.TransportError = (*icmpv4FragmentationNeededSockError)(nil) // icmpv4FragmentationNeededSockError is an ICMPv4 Destination Unreachable error @@ -256,8 +361,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { // It's possible that a raw socket expects to receive this. e.dispatcher.DeliverTransportPacket(header.ICMPv4ProtocolNumber, pkt) - pkt = stack.PacketBufferPtr{} - _ = pkt // Suppress unused variable warning. + pkt = nil sent := e.stats.icmp.packetsSent if !e.protocol.allowICMPReply(header.ICMPv4EchoReply, header.ICMPv4UnusedCode) { @@ -270,7 +374,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { // or multicast address). localAddr := ipHdr.DestinationAddress() if localAddressBroadcast || header.IsV4MulticastAddress(localAddr) { - localAddr = "" + localAddr = tcpip.Address{} } r, err := e.protocol.stack.FindRoute(e.nic.ID(), localAddr, ipHdr.SourceAddress(), ProtocolNumber, false /* multicastLoop */) @@ -310,7 +414,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { // // Take the base of the incoming request IP header but replace the options. replyHeaderLength := uint8(header.IPv4MinimumSize + len(newOptions)) - replyIPHdrView := bufferv2.NewView(int(replyHeaderLength)) + replyIPHdrView := buffer.NewView(int(replyHeaderLength)) replyIPHdrView.Write(iph[:header.IPv4MinimumSize]) replyIPHdrView.Write(newOptions) replyIPHdr := header.IPv4(replyIPHdrView.AsSlice()) @@ -327,7 +431,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { replyICMPHdr.SetChecksum(0) replyICMPHdr.SetChecksum(^checksum.Checksum(replyData.AsSlice(), 0)) - replyBuf := bufferv2.MakeWithView(replyIPHdrView) + replyBuf := buffer.MakeWithView(replyIPHdrView) replyBuf.Append(replyData.Clone()) replyPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ ReserveHeaderBytes: int(r.MaxHeaderLength()), @@ -362,7 +466,17 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { mtu := h.MTU() code := h.Code() switch code { - case header.ICMPv4HostUnreachable: + case header.ICMPv4NetUnreachable, + header.ICMPv4DestinationNetworkUnknown, + header.ICMPv4NetUnreachableForTos, + header.ICMPv4NetProhibited: + e.handleControl(&icmpv4DestinationNetUnreachableSockError{}, pkt) + case header.ICMPv4HostUnreachable, + header.ICMPv4HostProhibited, + header.ICMPv4AdminProhibited, + header.ICMPv4HostUnreachableForTos, + header.ICMPv4HostPrecedenceViolation, + header.ICMPv4PrecedenceCutInEffect: e.handleControl(&icmpv4DestinationHostUnreachableSockError{}, pkt) case header.ICMPv4PortUnreachable: e.handleControl(&icmpv4DestinationPortUnreachableSockError{}, pkt) @@ -372,6 +486,14 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) { networkMTU = 0 } e.handleControl(&icmpv4FragmentationNeededSockError{mtu: networkMTU}, pkt) + case header.ICMPv4ProtoUnreachable: + e.handleControl(&icmpv4DestinationProtoUnreachableSockError{}, pkt) + case header.ICMPv4SourceRouteFailed: + e.handleControl(&icmpv4SourceRouteFailedSockError{}, pkt) + case header.ICMPv4SourceHostIsolated: + e.handleControl(&icmpv4SourceHostIsolatedSockError{}, pkt) + case header.ICMPv4DestinationHostUnknown: + e.handleControl(&icmpv4DestinationHostUnknownSockError{}, pkt) } case header.ICMPv4SrcQuench: received.srcQuench.Increment() @@ -522,7 +644,7 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del // destination address of a packet we are forwarding. localAddr := origIPHdrDst if !deliveredLocally { - localAddr = "" + localAddr = tcpip.Address{} } // Even if we were able to receive a packet from some remote, we may not have @@ -645,7 +767,7 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del // required. This is now the payload of the new ICMP packet and no longer // considered a packet in its own right. - payload := bufferv2.MakeWithView(pkt.NetworkHeader().View()) + payload := buffer.MakeWithView(pkt.NetworkHeader().View()) payload.Append(pkt.TransportHeader().View()) if dataCap := payloadLen - int(payload.Size()); dataCap > 0 { buf := pkt.Data().ToBuffer() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go index bf338ea5e6..6db1cf173c 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/igmp.go @@ -16,10 +16,10 @@ package ipv4 import ( "fmt" + "math" "time" - "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/network/internal/ip" @@ -27,11 +27,6 @@ import ( ) const ( - // igmpV1PresentDefault is the initial state for igmpV1Present in the - // igmpState. As per RFC 2236 Page 9 says "No IGMPv1 Router Present ... is - // the initial state." - igmpV1PresentDefault = 0 - // v1RouterPresentTimeout from RFC 2236 Section 8.11, Page 18 // See note on igmpState.igmpV1Present for more detail. v1RouterPresentTimeout = 400 * time.Second @@ -50,6 +45,50 @@ const ( UnsolicitedReportIntervalMax = 10 * time.Second ) +type protocolMode int + +const ( + protocolModeV2OrV3 protocolMode = iota + protocolModeV1 + // protocolModeV1Compatibility is for maintaining compatibility with IGMPv1 + // Routers. + // + // Per RFC 2236 Section 4 Page 6: "The IGMPv1 router expects Version 1 + // Membership Reports in response to its Queries, and will not pay + // attention to Version 2 Membership Reports. Therefore, a state variable + // MUST be kept for each interface, describing whether the multicast + // Querier on that interface is running IGMPv1 or IGMPv2. This variable + // MUST be based upon whether or not an IGMPv1 query was heard in the last + // [Version 1 Router Present Timeout] seconds". + protocolModeV1Compatibility +) + +// IGMPVersion is the forced version of IGMP. +type IGMPVersion int + +const ( + _ IGMPVersion = iota + // IGMPVersion1 indicates IGMPv1. + IGMPVersion1 + // IGMPVersion2 indicates IGMPv2. Note that IGMP may still fallback to V1 + // compatibility mode as required by IGMPv2. + IGMPVersion2 + // IGMPVersion3 indicates IGMPv3. Note that IGMP may still fallback to V2 + // compatibility mode as required by IGMPv3. + IGMPVersion3 +) + +// IGMPEndpoint is a network endpoint that supports IGMP. +type IGMPEndpoint interface { + // SetIGMPVersion sets the IGMP version. + // + // Returns the previous IGMP version. + SetIGMPVersion(IGMPVersion) IGMPVersion + + // GetIGMPVersion returns the IGMP version. + GetIGMPVersion() IGMPVersion +} + // IGMPOptions holds options for IGMP. type IGMPOptions struct { // Enabled indicates whether IGMP will be performed. @@ -74,17 +113,8 @@ type igmpState struct { genericMulticastProtocol ip.GenericMulticastProtocolState - // igmpV1Present is for maintaining compatibility with IGMPv1 Routers, from - // RFC 2236 Section 4 Page 6: "The IGMPv1 router expects Version 1 - // Membership Reports in response to its Queries, and will not pay - // attention to Version 2 Membership Reports. Therefore, a state variable - // MUST be kept for each interface, describing whether the multicast - // Querier on that interface is running IGMPv1 or IGMPv2. This variable - // MUST be based upon whether or not an IGMPv1 query was heard in the last - // [Version 1 Router Present Timeout] seconds". - // - // Holds a value of 1 when true, 0 when false. - igmpV1Present atomicbitops.Uint32 + // mode is used to configure the version of IGMP to perform. + mode protocolMode // igmpV1Job is scheduled when this interface receives an IGMPv1 style // message, upon expiration the igmpV1Present flag is cleared. @@ -104,8 +134,12 @@ func (igmp *igmpState) Enabled() bool { // +checklocksread:igmp.ep.mu func (igmp *igmpState) SendReport(groupAddress tcpip.Address) (bool, tcpip.Error) { igmpType := header.IGMPv2MembershipReport - if igmp.v1Present() { + switch igmp.mode { + case protocolModeV2OrV3: + case protocolModeV1, protocolModeV1Compatibility: igmpType = header.IGMPv1MembershipReport + default: + panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode)) } return igmp.writePacket(groupAddress, groupAddress, igmpType) } @@ -118,11 +152,15 @@ func (igmp *igmpState) SendLeave(groupAddress tcpip.Address) tcpip.Error { // Querier is running IGMPv1, this action SHOULD be skipped. If the flag // saying we were the last host to report is cleared, this action MAY be // skipped." - if igmp.v1Present() { + switch igmp.mode { + case protocolModeV2OrV3: + _, err := igmp.writePacket(header.IPv4AllRoutersGroup, groupAddress, header.IGMPLeaveGroup) + return err + case protocolModeV1, protocolModeV1Compatibility: return nil + default: + panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode)) } - _, err := igmp.writePacket(header.IPv4AllRoutersGroup, groupAddress, header.IGMPLeaveGroup) - return err } // ShouldPerformProtocol implements ip.MulticastGroupProtocol. @@ -136,6 +174,108 @@ func (igmp *igmpState) ShouldPerformProtocol(groupAddress tcpip.Address) bool { return groupAddress != header.IPv4AllSystems } +type igmpv3ReportBuilder struct { + igmp *igmpState + + records []header.IGMPv3ReportGroupAddressRecordSerializer +} + +// AddRecord implements ip.MulticastGroupProtocolV2ReportBuilder. +func (b *igmpv3ReportBuilder) AddRecord(genericRecordType ip.MulticastGroupProtocolV2ReportRecordType, groupAddress tcpip.Address) { + var recordType header.IGMPv3ReportRecordType + switch genericRecordType { + case ip.MulticastGroupProtocolV2ReportRecordModeIsInclude: + recordType = header.IGMPv3ReportRecordModeIsInclude + case ip.MulticastGroupProtocolV2ReportRecordModeIsExclude: + recordType = header.IGMPv3ReportRecordModeIsExclude + case ip.MulticastGroupProtocolV2ReportRecordChangeToIncludeMode: + recordType = header.IGMPv3ReportRecordChangeToIncludeMode + case ip.MulticastGroupProtocolV2ReportRecordChangeToExcludeMode: + recordType = header.IGMPv3ReportRecordChangeToExcludeMode + case ip.MulticastGroupProtocolV2ReportRecordAllowNewSources: + recordType = header.IGMPv3ReportRecordAllowNewSources + case ip.MulticastGroupProtocolV2ReportRecordBlockOldSources: + recordType = header.IGMPv3ReportRecordBlockOldSources + default: + panic(fmt.Sprintf("unrecognied genericRecordType = %d", genericRecordType)) + } + + b.records = append(b.records, header.IGMPv3ReportGroupAddressRecordSerializer{ + RecordType: recordType, + GroupAddress: groupAddress, + Sources: nil, + }) +} + +// Send implements ip.MulticastGroupProtocolV2ReportBuilder. +// +// +checklocksread:b.igmp.ep.mu +func (b *igmpv3ReportBuilder) Send() (sent bool, err tcpip.Error) { + if len(b.records) == 0 { + return false, err + } + + options := header.IPv4OptionsSerializer{ + &header.IPv4SerializableRouterAlertOption{}, + } + mtu := int(b.igmp.ep.MTU()) - int(options.Length()) + + allSentWithSpecifiedAddress := true + var firstErr tcpip.Error + for records := b.records; len(records) != 0; { + spaceLeft := mtu + maxRecords := 0 + + for ; maxRecords < len(records); maxRecords++ { + tmp := spaceLeft - records[maxRecords].Length() + if tmp > 0 { + spaceLeft = tmp + } else { + break + } + } + + serializer := header.IGMPv3ReportSerializer{Records: records[:maxRecords]} + records = records[maxRecords:] + + icmpView := buffer.NewViewSize(serializer.Length()) + serializer.SerializeInto(icmpView.AsSlice()) + if sentWithSpecifiedAddress, err := b.igmp.writePacketInner( + icmpView, + b.igmp.ep.stats.igmp.packetsSent.v3MembershipReport, + options, + header.IGMPv3RoutersAddress, + ); err != nil { + if firstErr != nil { + firstErr = nil + } + allSentWithSpecifiedAddress = false + } else if !sentWithSpecifiedAddress { + allSentWithSpecifiedAddress = false + } + } + + return allSentWithSpecifiedAddress, firstErr +} + +// NewReportV2Builder implements ip.MulticastGroupProtocol. +func (igmp *igmpState) NewReportV2Builder() ip.MulticastGroupProtocolV2ReportBuilder { + return &igmpv3ReportBuilder{igmp: igmp} +} + +// V2QueryMaxRespCodeToV2Delay implements ip.MulticastGroupProtocol. +func (*igmpState) V2QueryMaxRespCodeToV2Delay(code uint16) time.Duration { + if code > math.MaxUint8 { + panic(fmt.Sprintf("got IGMPv3 MaxRespCode = %d, want <= %d", code, math.MaxUint8)) + } + return header.IGMPv3MaximumResponseDelay(uint8(code)) +} + +// V2QueryMaxRespCodeToV1Delay implements ip.MulticastGroupProtocol. +func (*igmpState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration { + return time.Duration(code) * time.Millisecond +} + // init sets up an igmpState struct, and is required to be called before using // a new igmpState. // @@ -148,9 +288,11 @@ func (igmp *igmpState) init(ep *endpoint) { Protocol: igmp, MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax, }) - igmp.igmpV1Present = atomicbitops.FromUint32(igmpV1PresentDefault) + // As per RFC 2236 Page 9 says "No IGMPv1 Router Present ... is + // the initial state. + igmp.mode = protocolModeV2OrV3 igmp.igmpV1Job = tcpip.NewJob(ep.protocol.stack.Clock(), &ep.mu, func() { - igmp.setV1Present(false) + igmp.mode = protocolModeV2OrV3 }) } @@ -206,12 +348,16 @@ func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageTyp // +checklocks:igmp.ep.mu func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOption bool) { received := igmp.ep.stats.igmp.packetsReceived - hdr, ok := pkt.Data().PullUp(header.IGMPMinimumSize) + hdr, ok := pkt.Data().PullUp(pkt.Data().Size()) if !ok { received.invalid.Increment() return } h := header.IGMP(hdr) + if len(h) < header.IGMPMinimumSize { + received.invalid.Increment() + return + } // As per RFC 1071 section 1.3, // @@ -231,7 +377,14 @@ func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOptio switch h.Type() { case header.IGMPMembershipQuery: received.membershipQuery.Increment() - if !isValid(header.IGMPQueryMinimumSize) { + if len(h) >= header.IGMPv3QueryMinimumSize { + if isValid(header.IGMPv3QueryMinimumSize) { + igmp.handleMembershipQueryV3(header.IGMPv3Query(h)) + } else { + received.invalid.Increment() + } + return + } else if !isValid(header.IGMPQueryMinimumSize) { received.invalid.Increment() return } @@ -267,21 +420,15 @@ func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOptio } } -func (igmp *igmpState) v1Present() bool { - return igmp.igmpV1Present.Load() == 1 -} - -func (igmp *igmpState) setV1Present(v bool) { - if v { - igmp.igmpV1Present.Store(1) - } else { - igmp.igmpV1Present.Store(0) - } -} - func (igmp *igmpState) resetV1Present() { igmp.igmpV1Job.Cancel() - igmp.setV1Present(false) + switch igmp.mode { + case protocolModeV2OrV3, protocolModeV1: + case protocolModeV1Compatibility: + igmp.mode = protocolModeV2OrV3 + default: + panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode)) + } } // handleMembershipQuery handles a membership query. @@ -292,15 +439,40 @@ func (igmp *igmpState) handleMembershipQuery(groupAddress tcpip.Address, maxResp // then change the state to note that an IGMPv1 router is present and // schedule the query received Job. if maxRespTime == 0 && igmp.Enabled() { - igmp.igmpV1Job.Cancel() - igmp.igmpV1Job.Schedule(v1RouterPresentTimeout) - igmp.setV1Present(true) + switch igmp.mode { + case protocolModeV2OrV3, protocolModeV1Compatibility: + igmp.igmpV1Job.Cancel() + igmp.igmpV1Job.Schedule(v1RouterPresentTimeout) + igmp.mode = protocolModeV1Compatibility + case protocolModeV1: + default: + panic(fmt.Sprintf("unrecognized mode = %d", igmp.mode)) + } + maxRespTime = v1MaxRespTime } igmp.genericMulticastProtocol.HandleQueryLocked(groupAddress, maxRespTime) } +// handleMembershipQueryV3 handles a membership query. +// +// +checklocks:igmp.ep.mu +func (igmp *igmpState) handleMembershipQueryV3(igmpHdr header.IGMPv3Query) { + sources, ok := igmpHdr.Sources() + if !ok { + return + } + + igmp.genericMulticastProtocol.HandleQueryV2Locked( + igmpHdr.GroupAddress(), + uint16(igmpHdr.MaximumResponseCode()), + sources, + igmpHdr.QuerierRobustnessVariable(), + igmpHdr.QuerierQueryInterval(), + ) +} + // handleMembershipReport handles a membership report. // // +checklocks:igmp.ep.mu @@ -312,15 +484,40 @@ func (igmp *igmpState) handleMembershipReport(groupAddress tcpip.Address) { // // +checklocksread:igmp.ep.mu func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip.Address, igmpType header.IGMPType) (bool, tcpip.Error) { - igmpView := bufferv2.NewViewSize(header.IGMPReportMinimumSize) + igmpView := buffer.NewViewSize(header.IGMPReportMinimumSize) igmpData := header.IGMP(igmpView.AsSlice()) igmpData.SetType(igmpType) igmpData.SetGroupAddress(groupAddress) igmpData.SetChecksum(header.IGMPCalculateChecksum(igmpData)) + var reportType tcpip.MultiCounterStat + sentStats := igmp.ep.stats.igmp.packetsSent + switch igmpType { + case header.IGMPv1MembershipReport: + reportType = sentStats.v1MembershipReport + case header.IGMPv2MembershipReport: + reportType = sentStats.v2MembershipReport + case header.IGMPLeaveGroup: + reportType = sentStats.leaveGroup + default: + panic(fmt.Sprintf("unrecognized igmp type = %d", igmpType)) + } + + return igmp.writePacketInner( + igmpView, + reportType, + header.IPv4OptionsSerializer{ + &header.IPv4SerializableRouterAlertOption{}, + }, + destAddress, + ) +} + +// +checklocksread:igmp.ep.mu +func (igmp *igmpState) writePacketInner(buf *buffer.View, reportStat tcpip.MultiCounterStat, options header.IPv4OptionsSerializer, destAddress tcpip.Address) (bool, tcpip.Error) { pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ ReserveHeaderBytes: int(igmp.ep.MaxHeaderLength()), - Payload: bufferv2.MakeWithView(igmpView), + Payload: buffer.MakeWithView(buf), }) defer pkt.DecRef() @@ -335,9 +532,7 @@ func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip Protocol: header.IGMPProtocolNumber, TTL: header.IGMPTTL, TOS: stack.DefaultTOS, - }, header.IPv4OptionsSerializer{ - &header.IPv4SerializableRouterAlertOption{}, - }); err != nil { + }, options); err != nil { panic(fmt.Sprintf("failed to add IP header: %s", err)) } @@ -346,16 +541,7 @@ func (igmp *igmpState) writePacket(destAddress tcpip.Address, groupAddress tcpip sentStats.dropped.Increment() return false, err } - switch igmpType { - case header.IGMPv1MembershipReport: - sentStats.v1MembershipReport.Increment() - case header.IGMPv2MembershipReport: - sentStats.v2MembershipReport.Increment() - case header.IGMPLeaveGroup: - sentStats.leaveGroup.Increment() - default: - panic(fmt.Sprintf("unrecognized igmp type = %d", igmpType)) - } + reportStat.Increment() return true, nil } @@ -410,7 +596,55 @@ func (igmp *igmpState) initializeAll() { // sendQueuedReports attempts to send any reports that are queued for sending. // -// +checklocksread:igmp.ep.mu +// +checklocks:igmp.ep.mu func (igmp *igmpState) sendQueuedReports() { igmp.genericMulticastProtocol.SendQueuedReportsLocked() } + +// setVersion sets the IGMP version. +// +// +checklocks:igmp.ep.mu +func (igmp *igmpState) setVersion(v IGMPVersion) IGMPVersion { + prev := igmp.mode + igmp.igmpV1Job.Cancel() + + var prevGenericModeV1 bool + switch v { + case IGMPVersion3: + prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(false) + igmp.mode = protocolModeV2OrV3 + case IGMPVersion2: + // IGMPv1 and IGMPv2 map to V1 of the generic multicast protocol. + prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(true) + igmp.mode = protocolModeV2OrV3 + case IGMPVersion1: + // IGMPv1 and IGMPv2 map to V1 of the generic multicast protocol. + prevGenericModeV1 = igmp.genericMulticastProtocol.SetV1ModeLocked(true) + igmp.mode = protocolModeV1 + default: + panic(fmt.Sprintf("unrecognized version = %d", v)) + } + + return toIGMPVersion(prev, prevGenericModeV1) +} + +func toIGMPVersion(mode protocolMode, genericV1 bool) IGMPVersion { + switch mode { + case protocolModeV2OrV3, protocolModeV1Compatibility: + if genericV1 { + return IGMPVersion2 + } + return IGMPVersion3 + case protocolModeV1: + return IGMPVersion1 + default: + panic(fmt.Sprintf("unrecognized mode = %d", mode)) + } +} + +// getVersion returns the IGMP version. +// +// +checklocksread:igmp.ep.mu +func (igmp *igmpState) getVersion() IGMPVersion { + return toIGMPVersion(igmp.mode, igmp.genericMulticastProtocol.GetV1ModeLocked()) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go index c669b888d2..2e5ab02649 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4.go @@ -22,7 +22,7 @@ import ( "time" "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -77,6 +77,7 @@ var _ stack.MulticastForwardingNetworkEndpoint = (*endpoint)(nil) var _ stack.GroupAddressableEndpoint = (*endpoint)(nil) var _ stack.AddressableEndpoint = (*endpoint)(nil) var _ stack.NetworkEndpoint = (*endpoint)(nil) +var _ IGMPEndpoint = (*endpoint)(nil) type endpoint struct { nic stack.NetworkInterface @@ -109,6 +110,32 @@ type endpoint struct { igmp igmpState } +// SetIGMPVersion implements IGMPEndpoint. +func (e *endpoint) SetIGMPVersion(v IGMPVersion) IGMPVersion { + e.mu.Lock() + defer e.mu.Unlock() + return e.setIGMPVersionLocked(v) +} + +// GetIGMPVersion implements IGMPEndpoint. +func (e *endpoint) GetIGMPVersion() IGMPVersion { + e.mu.RLock() + defer e.mu.RUnlock() + return e.getIGMPVersionLocked() +} + +// +checklocks:e.mu +// +checklocksalias:e.igmp.ep.mu=e.mu +func (e *endpoint) setIGMPVersionLocked(v IGMPVersion) IGMPVersion { + return e.igmp.setVersion(v) +} + +// +checklocksread:e.mu +// +checklocksalias:e.igmp.ep.mu=e.mu +func (e *endpoint) getIGMPVersionLocked() IGMPVersion { + return e.igmp.getVersion() +} + // HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint. func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) { // If we are operating as a router, return an ICMP error to the original @@ -691,6 +718,8 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu // necessary and the bit is also set. _ = e.protocol.returnError(&icmpReasonFragmentationNeeded{}, pkt, false /* deliveredLocally */) return &ip.ErrMessageTooLong{} + case *tcpip.ErrNoBufferSpace: + return &ip.ErrOutgoingDeviceNoBufferSpace{} default: return &ip.ErrOther{Err: err} } @@ -744,7 +773,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding return nil } - r, err := stk.FindRoute(0, "", dstAddr, ProtocolNumber, false /* multicastLoop */) + r, err := stk.FindRoute(0, tcpip.Address{}, dstAddr, ProtocolNumber, false /* multicastLoop */) switch err.(type) { case nil: // TODO(https://gvisor.dev/issues/8105): We should not observe ErrHostUnreachable from route @@ -854,6 +883,35 @@ func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksu } func validateAddressesForForwarding(h header.IPv4) ip.ForwardingError { + srcAddr := h.SourceAddress() + + // As per RFC 5735 section 3, + // + // 0.0.0.0/8 - Addresses in this block refer to source hosts on "this" + // network. Address 0.0.0.0/32 may be used as a source address for this + // host on this network; other addresses within 0.0.0.0/8 may be used to + // refer to specified hosts on this network ([RFC1122], Section 3.2.1.3). + // + // And RFC 6890 section 2.2.2, + // + // +----------------------+----------------------------+ + // | Attribute | Value | + // +----------------------+----------------------------+ + // | Address Block | 0.0.0.0/8 | + // | Name | "This host on this network"| + // | RFC | [RFC1122], Section 3.2.1.3 | + // | Allocation Date | September 1981 | + // | Termination Date | N/A | + // | Source | True | + // | Destination | False | + // | Forwardable | False | + // | Global | False | + // | Reserved-by-Protocol | True | + // +----------------------+----------------------------+ + if header.IPv4CurrentNetworkSubnet.Contains(srcAddr) { + return &ip.ErrInitializingSourceAddress{} + } + // As per RFC 3927 section 7, // // A router MUST NOT forward a packet with an IPv4 Link-Local source or @@ -864,10 +922,10 @@ func validateAddressesForForwarding(h header.IPv4) ip.ForwardingError { // destination address MUST NOT forward the packet. This prevents // forwarding of packets back onto the network segment from which they // originated, or to any other segment. - if header.IsV4LinkLocalUnicastAddress(h.SourceAddress()) { + if header.IsV4LinkLocalUnicastAddress(srcAddr) { return &ip.ErrLinkLocalSourceAddress{} } - if header.IsV4LinkLocalUnicastAddress(h.DestinationAddress()) || header.IsV4LinkLocalMulticastAddress(h.DestinationAddress()) { + if dstAddr := h.DestinationAddress(); header.IsV4LinkLocalUnicastAddress(dstAddr) || header.IsV4LinkLocalMulticastAddress(dstAddr) { return &ip.ErrLinkLocalDestinationAddress{} } return nil @@ -1105,9 +1163,11 @@ func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPt // counters. func (e *endpoint) handleForwardingError(err ip.ForwardingError) { stats := e.stats.ip - switch err.(type) { + switch err := err.(type) { case nil: return + case *ip.ErrInitializingSourceAddress: + stats.Forwarding.InitializingSource.Increment() case *ip.ErrLinkLocalSourceAddress: stats.Forwarding.LinkLocalSource.Increment() case *ip.ErrLinkLocalDestinationAddress: @@ -1126,6 +1186,8 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) { stats.Forwarding.UnexpectedMulticastInputInterface.Increment() case *ip.ErrUnknownOutputEndpoint: stats.Forwarding.UnknownOutputEndpoint.Increment() + case *ip.ErrOutgoingDeviceNoBufferSpace: + stats.Forwarding.OutgoingDeviceNoBufferSpace.Increment() default: panic(fmt.Sprintf("unrecognized forwarding error: %s", err)) } @@ -1665,7 +1727,7 @@ func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, inst } func (p *protocol) isUnicastAddress(addr tcpip.Address) bool { - if len(addr) != header.IPv4AddressSize { + if addr.BitLen() != header.IPv4AddressSizeBits { return false } @@ -1699,7 +1761,7 @@ func (p *protocol) isSubnetLocalBroadcastAddress(addr tcpip.Address) bool { // returns the parsed IP header. // // Returns true if the IP header was successfully parsed. -func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*bufferv2.View, bool) { +func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) { transProtoNum, hasTransportHdr, ok := p.Parse(pkt) if !ok { return nil, false @@ -1822,8 +1884,9 @@ func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool { // on a tcpip.Address (a string) without the need to convert it to a byte slice, // which would cause an allocation. func addressToUint32(addr tcpip.Address) uint32 { - _ = addr[3] // bounds check hint to compiler - return uint32(addr[0]) | uint32(addr[1])<<8 | uint32(addr[2])<<16 | uint32(addr[3])<<24 + addrBytes := addr.As4() + _ = addrBytes[3] // bounds check hint to compiler + return uint32(addrBytes[0]) | uint32(addrBytes[1])<<8 | uint32(addrBytes[2])<<16 | uint32(addrBytes[3])<<24 } // hashRoute calculates a hash value for the given source/destination pair using @@ -2240,7 +2303,7 @@ func (e *endpoint) processIPOptions(pkt stack.PacketBufferPtr, opts header.IPv4O // really forwarding packets as we may need to get two addresses, for rx and // tx interfaces. We will also have to take usage into account. localAddress := e.MainAddress().Address - if len(localAddress) == 0 { + if localAddress.BitLen() == 0 { h := header.IPv4(pkt.NetworkHeader().Slice()) dstAddr := h.DestinationAddress() if pkt.NetworkPacketInfo.LocalAddressBroadcast || header.IsV4MulticastAddress(dstAddr) { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go index 19b672251b..d538eecb97 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/ipv4_state_autogen.go @@ -52,6 +52,31 @@ func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(stateSourceObject stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } +func (i *icmpv4DestinationNetUnreachableSockError) StateTypeName() string { + return "pkg/tcpip/network/ipv4.icmpv4DestinationNetUnreachableSockError" +} + +func (i *icmpv4DestinationNetUnreachableSockError) StateFields() []string { + return []string{ + "icmpv4DestinationUnreachableSockError", + } +} + +func (i *icmpv4DestinationNetUnreachableSockError) beforeSave() {} + +// +checklocksignore +func (i *icmpv4DestinationNetUnreachableSockError) StateSave(stateSinkObject state.Sink) { + i.beforeSave() + stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4DestinationNetUnreachableSockError) afterLoad() {} + +// +checklocksignore +func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) +} + func (i *icmpv4DestinationPortUnreachableSockError) StateTypeName() string { return "pkg/tcpip/network/ipv4.icmpv4DestinationPortUnreachableSockError" } @@ -77,6 +102,106 @@ func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(stateSourceObject stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) } +func (i *icmpv4DestinationProtoUnreachableSockError) StateTypeName() string { + return "pkg/tcpip/network/ipv4.icmpv4DestinationProtoUnreachableSockError" +} + +func (i *icmpv4DestinationProtoUnreachableSockError) StateFields() []string { + return []string{ + "icmpv4DestinationUnreachableSockError", + } +} + +func (i *icmpv4DestinationProtoUnreachableSockError) beforeSave() {} + +// +checklocksignore +func (i *icmpv4DestinationProtoUnreachableSockError) StateSave(stateSinkObject state.Sink) { + i.beforeSave() + stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad() {} + +// +checklocksignore +func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4SourceRouteFailedSockError) StateTypeName() string { + return "pkg/tcpip/network/ipv4.icmpv4SourceRouteFailedSockError" +} + +func (i *icmpv4SourceRouteFailedSockError) StateFields() []string { + return []string{ + "icmpv4DestinationUnreachableSockError", + } +} + +func (i *icmpv4SourceRouteFailedSockError) beforeSave() {} + +// +checklocksignore +func (i *icmpv4SourceRouteFailedSockError) StateSave(stateSinkObject state.Sink) { + i.beforeSave() + stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4SourceRouteFailedSockError) afterLoad() {} + +// +checklocksignore +func (i *icmpv4SourceRouteFailedSockError) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4SourceHostIsolatedSockError) StateTypeName() string { + return "pkg/tcpip/network/ipv4.icmpv4SourceHostIsolatedSockError" +} + +func (i *icmpv4SourceHostIsolatedSockError) StateFields() []string { + return []string{ + "icmpv4DestinationUnreachableSockError", + } +} + +func (i *icmpv4SourceHostIsolatedSockError) beforeSave() {} + +// +checklocksignore +func (i *icmpv4SourceHostIsolatedSockError) StateSave(stateSinkObject state.Sink) { + i.beforeSave() + stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4SourceHostIsolatedSockError) afterLoad() {} + +// +checklocksignore +func (i *icmpv4SourceHostIsolatedSockError) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4DestinationHostUnknownSockError) StateTypeName() string { + return "pkg/tcpip/network/ipv4.icmpv4DestinationHostUnknownSockError" +} + +func (i *icmpv4DestinationHostUnknownSockError) StateFields() []string { + return []string{ + "icmpv4DestinationUnreachableSockError", + } +} + +func (i *icmpv4DestinationHostUnknownSockError) beforeSave() {} + +// +checklocksignore +func (i *icmpv4DestinationHostUnknownSockError) StateSave(stateSinkObject state.Sink) { + i.beforeSave() + stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError) +} + +func (i *icmpv4DestinationHostUnknownSockError) afterLoad() {} + +// +checklocksignore +func (i *icmpv4DestinationHostUnknownSockError) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError) +} + func (e *icmpv4FragmentationNeededSockError) StateTypeName() string { return "pkg/tcpip/network/ipv4.icmpv4FragmentationNeededSockError" } @@ -108,6 +233,11 @@ func (e *icmpv4FragmentationNeededSockError) StateLoad(stateSourceObject state.S func init() { state.Register((*icmpv4DestinationUnreachableSockError)(nil)) state.Register((*icmpv4DestinationHostUnreachableSockError)(nil)) + state.Register((*icmpv4DestinationNetUnreachableSockError)(nil)) state.Register((*icmpv4DestinationPortUnreachableSockError)(nil)) + state.Register((*icmpv4DestinationProtoUnreachableSockError)(nil)) + state.Register((*icmpv4SourceRouteFailedSockError)(nil)) + state.Register((*icmpv4SourceHostIsolatedSockError)(nil)) + state.Register((*icmpv4DestinationHostUnknownSockError)(nil)) state.Register((*icmpv4FragmentationNeededSockError)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go index 5798cfec60..9ebef99e79 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/network/ipv4/stats.go @@ -131,6 +131,7 @@ type multiCounterIGMPPacketStats struct { membershipQuery tcpip.MultiCounterStat v1MembershipReport tcpip.MultiCounterStat v2MembershipReport tcpip.MultiCounterStat + v3MembershipReport tcpip.MultiCounterStat leaveGroup tcpip.MultiCounterStat } @@ -138,6 +139,7 @@ func (m *multiCounterIGMPPacketStats) init(a, b *tcpip.IGMPPacketStats) { m.membershipQuery.Init(a.MembershipQuery, b.MembershipQuery) m.v1MembershipReport.Init(a.V1MembershipReport, b.V1MembershipReport) m.v2MembershipReport.Init(a.V2MembershipReport, b.V2MembershipReport) + m.v3MembershipReport.Init(a.V3MembershipReport, b.V3MembershipReport) m.leaveGroup.Init(a.LeaveGroup, b.LeaveGroup) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go index b070b80e31..11a9dc0b1e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/ports/ports.go @@ -27,8 +27,11 @@ import ( ) const ( - firstEphemeral = 16000 - anyIPAddress tcpip.Address = "" + firstEphemeral = 16000 +) + +var ( + anyIPAddress = tcpip.Address{} ) // Reservation describes a port reservation. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go index 28d04ba267..a3aadb223a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/socketops.go @@ -16,7 +16,7 @@ package tcpip import ( "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" ) @@ -606,7 +606,7 @@ type SockError struct { Cause SockErrorCause // Payload is the errant packet's payload. - Payload *bufferv2.View + Payload *buffer.View // Dst is the original destination address of the errant packet. Dst FullAddress // Offender is the original sender address of the errant packet. @@ -653,7 +653,7 @@ func (so *SocketOptions) QueueErr(err *SockError) { } // QueueLocalErr queues a local error onto the local queue. -func (so *SocketOptions) QueueLocalErr(err Error, net NetworkProtocolNumber, info uint32, dst FullAddress, payload *bufferv2.View) { +func (so *SocketOptions) QueueLocalErr(err Error, net NetworkProtocolNumber, info uint32, dst FullAddress, payload *buffer.View) { so.QueueErr(&SockError{ Err: err, Cause: &LocalSockError{info: info}, diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go index fadbf04a8a..866a2c365d 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/address_state_refs.go @@ -43,7 +43,8 @@ type addressStateRefs struct { // InitRefs initializes r with one reference and, if enabled, activates leak // checking. func (r *addressStateRefs) InitRefs() { - r.refCount.Store(1) + + r.refCount.RacyStore(1) refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go index 95e11ac477..02bce87042 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/conntrack.go @@ -981,8 +981,8 @@ func (ct *ConnTrack) bucket(id tupleID) int { func (ct *ConnTrack) bucketWithTableLength(id tupleID, tableLength int) int { h := jenkins.Sum32(ct.seed) - h.Write([]byte(id.srcAddr)) - h.Write([]byte(id.dstAddr)) + h.Write(id.srcAddr.AsSlice()) + h.Write(id.dstAddr.AsSlice()) shortBuf := make([]byte, 2) binary.LittleEndian.PutUint16(shortBuf, id.srcPortOrEchoRequestIdent) h.Write([]byte(shortBuf)) @@ -1119,14 +1119,14 @@ func (ct *ConnTrack) originalDst(epID TransportEndpointID, netProto tcpip.Networ t := ct.connForTID(tid) if t == nil { // Not a tracked connection. - return "", 0, &tcpip.ErrNotConnected{} + return tcpip.Address{}, 0, &tcpip.ErrNotConnected{} } t.conn.mu.RLock() defer t.conn.mu.RUnlock() if t.conn.destinationManip == manipNotPerformed { // Unmanipulated destination. - return "", 0, &tcpip.ErrInvalidOptionValue{} + return tcpip.Address{}, 0, &tcpip.ErrInvalidOptionValue{} } id := t.conn.original.tupleID diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go index c59c023dd3..2a2a301310 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/gro.go @@ -15,6 +15,7 @@ package stack import ( + "bytes" "fmt" "time" @@ -24,6 +25,9 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/header" ) +// TODO(b/256037250): Enable by default. +// TODO(b/256037250): We parse headers here. We should save those headers in +// PacketBuffers so they don't have to be re-parsed later. // TODO(b/256037250): I still see the occasional SACK block in the zero-loss // benchmark, which should not happen. // TODO(b/256037250): Some dispatchers, e.g. XDP and RecvMmsg, can receive @@ -31,7 +35,6 @@ import ( // opportunity for coalescing. // TODO(b/256037250): We're doing some header parsing here, which presents the // opportunity to skip it later. -// TODO(b/256037250): Disarm or ignore the timer when GRO is empty. // TODO(b/256037250): We may be able to remove locking by pairing // groDispatchers with link endpoint dispatchers. @@ -77,7 +80,7 @@ func (gb *groBucket) full() bool { // insert inserts pkt into the bucket. // +checklocks:gb.mu -func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr header.TCP, ep NetworkEndpoint) { +func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint) { groPkt := &gb.packetsPrealloc[gb.allocIdxs[gb.count]] *groPkt = groPacket{ pkt: pkt, @@ -85,7 +88,7 @@ func (gb *groBucket) insert(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr heade ep: ep, ipHdr: ipHdr, tcpHdr: tcpHdr, - initialLength: ipHdr.TotalLength(), + initialLength: pkt.Data().Size(), // pkt.Data() contains network header. idx: groPkt.idx, } gb.count++ @@ -114,14 +117,15 @@ func (gb *groBucket) removeOne(pkt *groPacket) { pkt.reset() } -// findGROPacket returns the groPkt that matches ipHdr and tcpHdr, or nil if +// findGROPacket4 returns the groPkt that matches ipHdr and tcpHdr, or nil if // none exists. It also returns whether the groPkt should be flushed based on // differences between the two headers. // +checklocks:gb.mu -func (gb *groBucket) findGROPacket(ipHdr header.IPv4, tcpHdr header.TCP) (*groPacket, bool) { +func (gb *groBucket) findGROPacket4(pkt PacketBufferPtr, ipHdr header.IPv4, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { // Do the addresses match? - if ipHdr.SourceAddress() != groPkt.ipHdr.SourceAddress() || ipHdr.DestinationAddress() != groPkt.ipHdr.DestinationAddress() { + groIPHdr := header.IPv4(groPkt.ipHdr) + if ipHdr.SourceAddress() != groIPHdr.SourceAddress() || ipHdr.DestinationAddress() != groIPHdr.DestinationAddress() { continue } @@ -134,31 +138,18 @@ func (gb *groBucket) findGROPacket(ipHdr header.IPv4, tcpHdr header.TCP) (*groPa // IP checks. TOS, _ := ipHdr.TOS() - groTOS, _ := groPkt.ipHdr.TOS() - if ipHdr.TTL() != groPkt.ipHdr.TTL() || TOS != groTOS { + groTOS, _ := groIPHdr.TOS() + if ipHdr.TTL() != groIPHdr.TTL() || TOS != groTOS { return groPkt, true } // TCP checks. - flags := tcpHdr.Flags() - groPktFlags := groPkt.tcpHdr.Flags() - dataOff := tcpHdr.DataOffset() - if flags&header.TCPFlagCwr != 0 || // Is congestion control occurring? - (flags^groPktFlags)&^(header.TCPFlagCwr|header.TCPFlagFin|header.TCPFlagPsh) != 0 || // Do the flags differ besides CRW, FIN, and PSH? - tcpHdr.AckNumber() != groPkt.tcpHdr.AckNumber() || // Do the ACKs match? - dataOff != groPkt.tcpHdr.DataOffset() || // Are the TCP headers the same length? - groPkt.tcpHdr.SequenceNumber()+uint32(groPkt.payloadSize()) != tcpHdr.SequenceNumber() { // Does the incoming packet match the expected sequence number? + if shouldFlushTCP(groPkt, tcpHdr) { return groPkt, true } - // The options, including timestamps, must be identical. - for i := header.TCPMinimumSize; i < int(dataOff); i++ { - if tcpHdr[i] != groPkt.tcpHdr[i] { - return groPkt, true - } - } // There's an upper limit on coalesced packet size. - if int(ipHdr.TotalLength())-header.IPv4MinimumSize-int(dataOff)+groPkt.pkt.Data().Size() >= groMaxPacketSize { + if pkt.Data().Size()-header.IPv4MinimumSize-int(tcpHdr.DataOffset())+groPkt.pkt.Data().Size() >= groMaxPacketSize { return groPkt, true } @@ -168,6 +159,141 @@ func (gb *groBucket) findGROPacket(ipHdr header.IPv4, tcpHdr header.TCP) (*groPa return nil, false } +// findGROPacket6 returns the groPkt that matches ipHdr and tcpHdr, or nil if +// none exists. It also returns whether the groPkt should be flushed based on +// differences between the two headers. +// +checklocks:gb.mu +func (gb *groBucket) findGROPacket6(pkt PacketBufferPtr, ipHdr header.IPv6, tcpHdr header.TCP, ep NetworkEndpoint) (*groPacket, bool) { + for groPkt := gb.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { + // Do the addresses match? + groIPHdr := header.IPv6(groPkt.ipHdr) + if ipHdr.SourceAddress() != groIPHdr.SourceAddress() || ipHdr.DestinationAddress() != groIPHdr.DestinationAddress() { + continue + } + + // Need to check that headers are the same except: + // - Traffic class, a difference of which causes a flush. + // - Hop limit, a difference of which causes a flush. + // - Length, which is checked later. + // - Version, which is checked by an earlier call to IsValid(). + trafficClass, flowLabel := ipHdr.TOS() + groTrafficClass, groFlowLabel := groIPHdr.TOS() + if flowLabel != groFlowLabel || ipHdr.NextHeader() != groIPHdr.NextHeader() { + continue + } + // Unlike IPv4, IPv6 packets with extension headers can be coalesced. + if !bytes.Equal(ipHdr[header.IPv6MinimumSize:], groIPHdr[header.IPv6MinimumSize:]) { + continue + } + + // Do the ports match? + if tcpHdr.SourcePort() != groPkt.tcpHdr.SourcePort() || tcpHdr.DestinationPort() != groPkt.tcpHdr.DestinationPort() { + continue + } + + // We've found a packet of the same flow. + + // TCP checks. + if shouldFlushTCP(groPkt, tcpHdr) { + return groPkt, true + } + + // Do the traffic class and hop limit match? + if trafficClass != groTrafficClass || ipHdr.HopLimit() != groIPHdr.HopLimit() { + return groPkt, true + } + + // This limit is artificial for IPv6 -- we could allow even + // larger packets via jumbograms. + if pkt.Data().Size()-len(ipHdr)-int(tcpHdr.DataOffset())+groPkt.pkt.Data().Size() >= groMaxPacketSize { + return groPkt, true + } + + return groPkt, false + } + + return nil, false +} + +// +checklocks:gb.mu +func (gb *groBucket) found(gd *groDispatcher, groPkt *groPacket, flushGROPkt bool, pkt PacketBufferPtr, ipHdr []byte, tcpHdr header.TCP, ep NetworkEndpoint, updateIPHdr func([]byte, int)) { + // Flush groPkt or merge the packets. + pktSize := pkt.Data().Size() + flags := tcpHdr.Flags() + dataOff := tcpHdr.DataOffset() + tcpPayloadSize := pkt.Data().Size() - len(ipHdr) - int(dataOff) + if flushGROPkt { + // Flush the existing GRO packet. Don't hold bucket.mu while + // processing the packet. + pkt := groPkt.pkt + gb.removeOne(groPkt) + gb.mu.Unlock() + ep.HandlePacket(pkt) + pkt.DecRef() + gb.mu.Lock() + groPkt = nil + } else if groPkt != nil { + // Merge pkt in to GRO packet. + pkt.Data().TrimFront(len(ipHdr) + int(dataOff)) + groPkt.pkt.Data().Merge(pkt.Data()) + // Update the IP total length. + updateIPHdr(groPkt.ipHdr, tcpPayloadSize) + // Add flags from the packet to the GRO packet. + groPkt.tcpHdr.SetFlags(uint8(groPkt.tcpHdr.Flags() | (flags & (header.TCPFlagFin | header.TCPFlagPsh)))) + + pkt = nil + } + + // Flush if the packet isn't the same size as the previous packets or + // if certain flags are set. The reason for checking size equality is: + // - If the packet is smaller than the others, this is likely the end + // of some message. Peers will send MSS-sized packets until they have + // insufficient data to do so. + // - If the packet is larger than the others, this packet is either + // malformed, a local GSO packet, or has already been handled by host + // GRO. + flush := header.TCPFlags(flags)&(header.TCPFlagUrg|header.TCPFlagPsh|header.TCPFlagRst|header.TCPFlagSyn|header.TCPFlagFin) != 0 + flush = flush || tcpPayloadSize == 0 + if groPkt != nil { + flush = flush || pktSize != groPkt.initialLength + } + + switch { + case flush && groPkt != nil: + // A merge occurred and we need to flush groPkt. + pkt := groPkt.pkt + gb.removeOne(groPkt) + gb.mu.Unlock() + ep.HandlePacket(pkt) + pkt.DecRef() + case flush && groPkt == nil: + // No merge occurred and the incoming packet needs to be flushed. + gb.mu.Unlock() + ep.HandlePacket(pkt) + case !flush && groPkt == nil: + // New flow and we don't need to flush. Insert pkt into GRO. + if gb.full() { + // Head is always the oldest packet + toFlush := gb.removeOldest() + gb.insert(pkt.IncRef(), ipHdr, tcpHdr, ep) + gb.mu.Unlock() + ep.HandlePacket(toFlush) + toFlush.DecRef() + } else { + gb.insert(pkt.IncRef(), ipHdr, tcpHdr, ep) + gb.mu.Unlock() + } + default: + // A merge occurred and we don't need to flush anything. + gb.mu.Unlock() + } + + // Schedule a timer if we never had one set before. + if gd.flushTimerState.CompareAndSwap(flushTimerUnset, flushTimerSet) { + gd.flushTimer.Reset(gd.getInterval()) + } +} + // A groPacket is packet undergoing GRO. It may be several packets coalesced // together. type groPacket struct { @@ -177,8 +303,8 @@ type groPacket struct { // pkt is the coalesced packet. pkt PacketBufferPtr - // ipHdr is the IP header for the coalesced packet. - ipHdr header.IPv4 + // ipHdr is the IP (v4 or v6) header for the coalesced packet. + ipHdr []byte // tcpHdr is the TCP header for the coalesced packet. tcpHdr header.TCP @@ -193,7 +319,7 @@ type groPacket struct { // used as a best-effort guess at MSS: senders will send MSS-sized // packets until they run out of data, so we coalesce as long as // packets are the same size. - initialLength uint16 + initialLength int // idx is the groPacket's index in its bucket packetsPrealloc. It is // immutable. @@ -209,27 +335,30 @@ func (pk *groPacket) reset() { // payloadSize is the payload size of the coalesced packet, which does not // include the network or transport headers. -func (pk *groPacket) payloadSize() uint16 { - return pk.ipHdr.TotalLength() - header.IPv4MinimumSize - uint16(pk.tcpHdr.DataOffset()) +func (pk *groPacket) payloadSize() int { + return pk.pkt.Data().Size() - len(pk.ipHdr) - int(pk.tcpHdr.DataOffset()) } +// Values held in groDispatcher.flushTimerState. +const ( + flushTimerUnset = iota + flushTimerSet + flushTimerClosed +) + // groDispatcher coalesces incoming packets to increase throughput. type groDispatcher struct { - // newInterval notifies about changes to the interval. - newInterval chan struct{} // intervalNS is the interval in nanoseconds. intervalNS atomicbitops.Int64 - // stop instructs the GRO dispatcher goroutine to stop. - stop chan struct{} buckets [groNBuckets]groBucket - wg sync.WaitGroup + + flushTimerState atomicbitops.Int32 + flushTimer *time.Timer } func (gd *groDispatcher) init(interval time.Duration) { gd.intervalNS.Store(interval.Nanoseconds()) - gd.newInterval = make(chan struct{}, 1) - gd.stop = make(chan struct{}) for i := range gd.buckets { bucket := &gd.buckets[i] @@ -241,76 +370,72 @@ func (gd *groDispatcher) init(interval time.Duration) { bucket.mu.Unlock() } - gd.start(interval) -} - -// start spawns a goroutine that flushes the GRO periodically based on the -// interval. -func (gd *groDispatcher) start(interval time.Duration) { - gd.wg.Add(1) - - go func(interval time.Duration) { - defer gd.wg.Done() + // Create a timer to fire far from now and cancel it immediately. + // + // The timer will be reset when there is a need for it to fire. + gd.flushTimer = time.AfterFunc(time.Hour, func() { + if !gd.flushTimerState.CompareAndSwap(flushTimerSet, flushTimerUnset) { + // Timer was unset or GRO is closed, do nothing further. + return + } - var ch <-chan time.Time + interval := gd.getInterval() if interval == 0 { - // Never run. - ch = make(<-chan time.Time) - } else { - ticker := time.NewTicker(interval) - ch = ticker.C + gd.flushAll() + return } - for { - select { - case <-gd.newInterval: - interval = time.Duration(gd.intervalNS.Load()) * time.Nanosecond - if interval == 0 { - // Never run. Flush any existing GRO packets. - gd.flushAll() - ch = make(<-chan time.Time) - } else { - ticker := time.NewTicker(interval) - ch = ticker.C - } - case <-ch: - gd.flush() - case <-gd.stop: - return - } + + if gd.flush() && gd.flushTimerState.CompareAndSwap(flushTimerUnset, flushTimerSet) { + // Only reset the timer if we have more packets and the timer was + // previously unset. If we have no packets left, the timer is already set + // or GRO is being closed, do not reset the timer. + gd.flushTimer.Reset(interval) } - }(interval) + }) + gd.flushTimer.Stop() } func (gd *groDispatcher) getInterval() time.Duration { return time.Duration(gd.intervalNS.Load()) * time.Nanosecond } +// setInterval is not thread-safe and so much be protected by callers. func (gd *groDispatcher) setInterval(interval time.Duration) { gd.intervalNS.Store(interval.Nanoseconds()) - gd.newInterval <- struct{}{} + + if gd.flushTimerState.Load() == flushTimerSet { + // Timer was previously set, reset it. + gd.flushTimer.Reset(interval) + } } // dispatch sends pkt up the stack after it undergoes GRO coalescing. func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkProtocolNumber, ep NetworkEndpoint) { // If GRO is disabled simply pass the packet along. - if gd.intervalNS.Load() == 0 { + if gd.getInterval() == 0 { ep.HandlePacket(pkt) return } + switch netProto { + case header.IPv4ProtocolNumber: + gd.dispatch4(pkt, ep) + case header.IPv6ProtocolNumber: + gd.dispatch6(pkt, ep) + default: + // We can't GRO this. + ep.HandlePacket(pkt) + } +} + +func (gd *groDispatcher) dispatch4(pkt PacketBufferPtr, ep NetworkEndpoint) { // Immediately get the IPv4 and TCP headers. We need a way to hash the // packet into its bucket, which requires addresses and ports. Linux // simply gets a hash passed by hardware, but we're not so lucky. - // We only GRO IPv4 packets. - if netProto != header.IPv4ProtocolNumber { - ep.HandlePacket(pkt) - return - } - - // We only GRO TCP4 packets. The check for the transport protocol - // number is done below so that we can PullUp both the IP and TCP - // headers together. + // We only GRO TCP packets. The check for the transport protocol number + // is done below so that we can PullUp both the IP and TCP headers + // together. hdrBytes, ok := pkt.Data().PullUp(header.IPv4MinimumSize + header.TCPMinimumSize) if !ok { ep.HandlePacket(pkt) @@ -318,9 +443,9 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro } ipHdr := header.IPv4(hdrBytes) - // We only handle atomic packets. That's the vast majority of traffic, - // and simplifies handling. - if ipHdr.FragmentOffset() != 0 || ipHdr.Flags()&header.IPv4FlagMoreFragments != 0 || ipHdr.Flags()&header.IPv4FlagDontFragment == 0 { + // We don't handle fragments. That should be the vast majority of + // traffic, and simplifies handling. + if ipHdr.FragmentOffset() != 0 || ipHdr.Flags()&header.IPv4FlagMoreFragments != 0 { ep.HandlePacket(pkt) return } @@ -331,6 +456,7 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro return } tcpHdr := header.TCP(hdrBytes[header.IPv4MinimumSize:]) + ipHdr = ipHdr[:header.IPv4MinimumSize] dataOff := tcpHdr.DataOffset() if dataOff < header.TCPMinimumSize { // Malformed packet: will be handled further up the stack. @@ -348,14 +474,14 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro // If either checksum is bad, flush the packet. Since we don't know // what bits were flipped, we can't identify this packet with a flow. - tcpPayloadSize := ipHdr.TotalLength() - header.IPv4MinimumSize - uint16(dataOff) if !pkt.RXChecksumValidated { if !ipHdr.IsValid(pkt.Data().Size()) || !ipHdr.IsChecksumValid() { ep.HandlePacket(pkt) return } payloadChecksum := pkt.Data().ChecksumAtOffset(header.IPv4MinimumSize + int(dataOff)) - if !tcpHdr.IsChecksumValid(ipHdr.SourceAddress(), ipHdr.DestinationAddress(), payloadChecksum, tcpPayloadSize) { + tcpPayloadSize := pkt.Data().Size() - header.IPv4MinimumSize - int(dataOff) + if !tcpHdr.IsChecksumValid(ipHdr.SourceAddress(), ipHdr.DestinationAddress(), payloadChecksum, uint16(tcpPayloadSize)) { ep.HandlePacket(pkt) return } @@ -367,86 +493,118 @@ func (gd *groDispatcher) dispatch(pkt PacketBufferPtr, netProto tcpip.NetworkPro // Now we can get the bucket for the packet. bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask] bucket.mu.Lock() - groPkt, flushGROPkt := bucket.findGROPacket(ipHdr, tcpHdr) + groPkt, flushGROPkt := bucket.findGROPacket4(pkt, ipHdr, tcpHdr, ep) + bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv4Hdr) +} - // Flush groPkt or merge the packets. - flags := tcpHdr.Flags() - if flushGROPkt { - // Flush the existing GRO packet. Don't hold bucket.mu while - // processing the packet. - pkt := groPkt.pkt - bucket.removeOne(groPkt) - bucket.mu.Unlock() - ep.HandlePacket(pkt) - pkt.DecRef() - bucket.mu.Lock() - groPkt = nil - } else if groPkt != nil { - // Merge pkt in to GRO packet. - buf := pkt.Data().ToBuffer() - buf.TrimFront(header.IPv4MinimumSize + int64(dataOff)) - groPkt.pkt.Data().MergeBuffer(&buf) - buf.Release() - // Add flags from the packet to the GRO packet. - groPkt.tcpHdr.SetFlags(uint8(groPkt.tcpHdr.Flags() | (flags & (header.TCPFlagFin | header.TCPFlagPsh)))) - // Update the IP total length. - groPkt.ipHdr.SetTotalLength(groPkt.ipHdr.TotalLength() + uint16(tcpPayloadSize)) +func (gd *groDispatcher) dispatch6(pkt PacketBufferPtr, ep NetworkEndpoint) { + // Immediately get the IPv6 and TCP headers. We need a way to hash the + // packet into its bucket, which requires addresses and ports. Linux + // simply gets a hash passed by hardware, but we're not so lucky. - pkt = PacketBufferPtr{} + hdrBytes, ok := pkt.Data().PullUp(header.IPv6MinimumSize) + if !ok { + ep.HandlePacket(pkt) + return + } + ipHdr := header.IPv6(hdrBytes) + + // Getting the IP header (+ extension headers) size is a bit of a pain + // on IPv6. + transProto := tcpip.TransportProtocolNumber(ipHdr.NextHeader()) + buf := pkt.Data().ToBuffer() + buf.TrimFront(header.IPv6MinimumSize) + it := header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(transProto), buf) + ipHdrSize := int(header.IPv6MinimumSize) + for { + transProto = tcpip.TransportProtocolNumber(it.NextHeaderIdentifier()) + extHdr, done, err := it.Next() + if err != nil { + ep.HandlePacket(pkt) + return + } + if done { + break + } + switch extHdr.(type) { + // We can GRO these, so just skip over them. + case header.IPv6HopByHopOptionsExtHdr: + case header.IPv6RoutingExtHdr: + case header.IPv6DestinationOptionsExtHdr: + default: + // This is either a TCP header or something we can't handle. + ipHdrSize = int(it.HeaderOffset()) + done = true + } + extHdr.Release() + if done { + break + } } - // Flush if the packet isn't the same size as the previous packets or - // if certain flags are set. The reason for checking size equality is: - // - If the packet is smaller than the others, this is likely the end - // of some message. Peers will send MSS-sized packets until they have - // insufficient data to do so. - // - If the packet is larger than the others, this packet is either - // malformed, a local GSO packet, or has already been handled by host - // GRO. - flush := header.TCPFlags(flags)&(header.TCPFlagUrg|header.TCPFlagPsh|header.TCPFlagRst|header.TCPFlagSyn|header.TCPFlagFin) != 0 - if groPkt != nil { - flush = flush || ipHdr.TotalLength() != groPkt.initialLength + hdrBytes, ok = pkt.Data().PullUp(ipHdrSize + header.TCPMinimumSize) + if !ok { + ep.HandlePacket(pkt) + return } + ipHdr = header.IPv6(hdrBytes[:ipHdrSize]) - switch { - case flush && groPkt != nil: - // A merge occurred and we need to flush groPkt. - pkt := groPkt.pkt - bucket.removeOne(groPkt) - bucket.mu.Unlock() + // We only handle TCP packets. + if transProto != header.TCPProtocolNumber { ep.HandlePacket(pkt) - pkt.DecRef() - case flush && groPkt == nil: - // No merge occurred and the incoming packet needs to be flushed. - bucket.mu.Unlock() + return + } + tcpHdr := header.TCP(hdrBytes[ipHdrSize:]) + dataOff := tcpHdr.DataOffset() + if dataOff < header.TCPMinimumSize { + // Malformed packet: will be handled further up the stack. ep.HandlePacket(pkt) - case !flush && groPkt == nil: - // New flow and we don't need to flush. Insert pkt into GRO. - if bucket.full() { - // Head is always the oldest packet - toFlush := bucket.removeOldest() - bucket.insert(pkt.IncRef(), ipHdr, tcpHdr, ep) - bucket.mu.Unlock() - ep.HandlePacket(toFlush) - toFlush.DecRef() - } else { - bucket.insert(pkt.IncRef(), ipHdr, tcpHdr, ep) - bucket.mu.Unlock() + return + } + + hdrBytes, ok = pkt.Data().PullUp(ipHdrSize + int(dataOff)) + if !ok { + // Malformed packet: will be handled further up the stack. + ep.HandlePacket(pkt) + return + } + tcpHdr = header.TCP(hdrBytes[ipHdrSize:]) + + // If either checksum is bad, flush the packet. Since we don't know + // what bits were flipped, we can't identify this packet with a flow. + if !pkt.RXChecksumValidated { + if !ipHdr.IsValid(pkt.Data().Size()) { + ep.HandlePacket(pkt) + return } - default: - // A merge occurred and we don't need to flush anything. - bucket.mu.Unlock() + payloadChecksum := pkt.Data().ChecksumAtOffset(ipHdrSize + int(dataOff)) + tcpPayloadSize := pkt.Data().Size() - ipHdrSize - int(dataOff) + if !tcpHdr.IsChecksumValid(ipHdr.SourceAddress(), ipHdr.DestinationAddress(), payloadChecksum, uint16(tcpPayloadSize)) { + ep.HandlePacket(pkt) + return + } + // We've validated the checksum, no reason for others to do it + // again. + pkt.RXChecksumValidated = true } + + // Now we can get the bucket for the packet. + bucket := &gd.buckets[gd.bucketForPacket(ipHdr, tcpHdr)&groNBucketsMask] + bucket.mu.Lock() + groPkt, flushGROPkt := bucket.findGROPacket6(pkt, ipHdr, tcpHdr, ep) + bucket.found(gd, groPkt, flushGROPkt, pkt, ipHdr, tcpHdr, ep, updateIPv6Hdr) } -func (gd *groDispatcher) bucketForPacket(ipHdr header.IPv4, tcpHdr header.TCP) int { +func (gd *groDispatcher) bucketForPacket(ipHdr header.Network, tcpHdr header.TCP) int { // TODO(b/256037250): Use jenkins or checksum. Write a test to print // distribution. var sum int - for _, val := range []byte(ipHdr.SourceAddress()) { + srcAddr := ipHdr.SourceAddress() + for _, val := range srcAddr.AsSlice() { sum += int(val) } - for _, val := range []byte(ipHdr.DestinationAddress()) { + dstAddr := ipHdr.DestinationAddress() + for _, val := range dstAddr.AsSlice() { sum += int(val) } sum += int(tcpHdr.SourcePort()) @@ -455,18 +613,26 @@ func (gd *groDispatcher) bucketForPacket(ipHdr header.IPv4, tcpHdr header.TCP) i } // flush sends any packets older than interval up the stack. -func (gd *groDispatcher) flush() { +// +// Returns true iff packets remain. +func (gd *groDispatcher) flush() bool { interval := gd.intervalNS.Load() old := time.Now().Add(-time.Duration(interval) * time.Nanosecond) - gd.flushSince(old) + return gd.flushSinceOrEqualTo(old) } -func (gd *groDispatcher) flushSince(old time.Time) { +// flushSinceOrEqualTo sends any packets older than or equal to the specified +// time. +// +// Returns true iff packets remain. +func (gd *groDispatcher) flushSinceOrEqualTo(old time.Time) bool { type pair struct { pkt PacketBufferPtr ep NetworkEndpoint } + hasMore := false + for i := range gd.buckets { // Put packets in a slice so we don't have to hold bucket.mu // when we call HandlePacket. @@ -476,13 +642,14 @@ func (gd *groDispatcher) flushSince(old time.Time) { bucket := &gd.buckets[i] bucket.mu.Lock() for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { - if groPkt.created.Before(old) { - pairs = append(pairs, pair{groPkt.pkt, groPkt.ep}) - bucket.removeOne(groPkt) - } else { + if groPkt.created.After(old) { // Packets are ordered by age, so we can move // on once we find one that's too new. + hasMore = true break + } else { + pairs = append(pairs, pair{groPkt.pkt, groPkt.ep}) + bucket.removeOne(groPkt) } } bucket.mu.Unlock() @@ -492,22 +659,28 @@ func (gd *groDispatcher) flushSince(old time.Time) { pair.pkt.DecRef() } } + + return hasMore } func (gd *groDispatcher) flushAll() { - gd.flushSince(time.Now()) + if gd.flushSinceOrEqualTo(time.Now()) { + panic("packets unexpectedly remain after flushing all") + } } // close stops the GRO goroutine and releases any held packets. func (gd *groDispatcher) close() { - gd.stop <- struct{}{} - gd.wg.Wait() + gd.flushTimer.Stop() + // Prevent the timer from being scheduled again. + gd.flushTimerState.Store(flushTimerClosed) for i := range gd.buckets { bucket := &gd.buckets[i] bucket.mu.Lock() - for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = groPkt.Next() { + for groPkt := bucket.packets.Front(); groPkt != nil; groPkt = bucket.packets.Front() { groPkt.pkt.DecRef() + bucket.removeOne(groPkt) } bucket.mu.Unlock() } @@ -528,3 +701,30 @@ func (gd *groDispatcher) String() string { } return ret } + +// shouldFlushTCP returns whether the TCP headers indicate that groPkt should +// be flushed +func shouldFlushTCP(groPkt *groPacket, tcpHdr header.TCP) bool { + flags := tcpHdr.Flags() + groPktFlags := groPkt.tcpHdr.Flags() + dataOff := tcpHdr.DataOffset() + if flags&header.TCPFlagCwr != 0 || // Is congestion control occurring? + (flags^groPktFlags)&^(header.TCPFlagCwr|header.TCPFlagFin|header.TCPFlagPsh) != 0 || // Do the flags differ besides CRW, FIN, and PSH? + tcpHdr.AckNumber() != groPkt.tcpHdr.AckNumber() || // Do the ACKs match? + dataOff != groPkt.tcpHdr.DataOffset() || // Are the TCP headers the same length? + groPkt.tcpHdr.SequenceNumber()+uint32(groPkt.payloadSize()) != tcpHdr.SequenceNumber() { // Does the incoming packet match the expected sequence number? + return true + } + // The options, including timestamps, must be identical. + return !bytes.Equal(tcpHdr[header.TCPMinimumSize:], groPkt.tcpHdr[header.TCPMinimumSize:]) +} + +func updateIPv4Hdr(ipHdrBytes []byte, newBytes int) { + ipHdr := header.IPv4(ipHdrBytes) + ipHdr.SetTotalLength(ipHdr.TotalLength() + uint16(newBytes)) +} + +func updateIPv6Hdr(ipHdrBytes []byte, newBytes int) { + ipHdr := header.IPv6(ipHdrBytes) + ipHdr.SetPayloadLength(ipHdr.PayloadLength() + uint16(newBytes)) +} diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go index 0ad5303c82..9efeb59534 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables.go @@ -692,7 +692,7 @@ func (it *IPTables) OriginalDst(epID TransportEndpointID, netProto tcpip.Network it.mu.RLock() defer it.mu.RUnlock() if !it.modified { - return "", 0, &tcpip.ErrNotConnected{} + return tcpip.Address{}, 0, &tcpip.ErrNotConnected{} } return it.connections.originalDst(epID, netProto, transProto) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go index 5b4a736fe6..4ba1f3e8d2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_targets.go @@ -233,7 +233,7 @@ func (rt *RedirectTarget) Action(pkt PacketBufferPtr, hook Hook, r *Route, addre switch hook { case Output: if pkt.NetworkProtocolNumber == header.IPv4ProtocolNumber { - address = tcpip.Address([]byte{127, 0, 0, 1}) + address = tcpip.AddrFrom4([4]byte{127, 0, 0, 1}) } else { address = header.IPv6Loopback } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go index ff1c4270ae..3a908f9ea8 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/iptables_types.go @@ -316,10 +316,10 @@ func matchIfName(nicName string, ifName string, invert bool) bool { // NetworkProtocol returns the protocol (IPv4 or IPv6) on to which the header // applies. func (fl IPHeaderFilter) NetworkProtocol() tcpip.NetworkProtocolNumber { - switch len(fl.Src) { - case header.IPv4AddressSize: + switch fl.Src.BitLen() { + case header.IPv4AddressSizeBits: return header.IPv4ProtocolNumber - case header.IPv6AddressSize: + case header.IPv6AddressSizeBits: return header.IPv6ProtocolNumber } panic(fmt.Sprintf("invalid address in IPHeaderFilter: %s", fl.Src)) @@ -328,8 +328,11 @@ func (fl IPHeaderFilter) NetworkProtocol() tcpip.NetworkProtocolNumber { // filterAddress returns whether addr matches the filter. func filterAddress(addr, mask, filterAddr tcpip.Address, invert bool) bool { matches := true - for i := range filterAddr { - if addr[i]&mask[i] != filterAddr[i] { + addrBytes := addr.AsSlice() + maskBytes := mask.AsSlice() + filterBytes := filterAddr.AsSlice() + for i := range filterAddr.AsSlice() { + if addrBytes[i]&maskBytes[i] != filterBytes[i] { matches = false break } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go index c637400742..b38bef4e21 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_cache.go @@ -20,7 +20,9 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" ) -const neighborCacheSize = 512 // max entries per interface +// NeighborCacheSize is the size of the neighborCache. Exceeding this size will +// result in the least recently used entry being evicted. +const NeighborCacheSize = 512 // max entries per interface // NeighborStats holds metrics for the neighbor table. type NeighborStats struct { @@ -87,7 +89,7 @@ func (n *neighborCache) getOrCreateEntry(remoteAddr tcpip.Address) *neighborEntr // The entry that needs to be created must be dynamic since all static // entries are directly added to the cache via addStaticEntry. entry := newNeighborEntry(n, remoteAddr, n.state) - if n.mu.dynamic.count == neighborCacheSize { + if n.mu.dynamic.count == NeighborCacheSize { e := n.mu.dynamic.lru.Back() e.mu.Lock() @@ -300,6 +302,6 @@ func (n *neighborCache) init(nic *nic, r LinkAddressResolver) { linkRes: r, } n.mu.Lock() - n.mu.cache = make(map[tcpip.Address]*neighborEntry, neighborCacheSize) + n.mu.cache = make(map[tcpip.Address]*neighborEntry, NeighborCacheSize) n.mu.Unlock() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry.go index d194e369c7..baa62f112a 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/neighbor_entry.go @@ -317,7 +317,7 @@ func (e *neighborEntry) setStateLocked(next NeighborState) { timer: e.cache.nic.stack.Clock().AfterFunc(immediateDuration, func() { var err tcpip.Error = &tcpip.ErrTimeout{} if remaining != 0 { - err = e.cache.linkRes.LinkAddressRequest(addr, "" /* localAddr */, linkAddr) + err = e.cache.linkRes.LinkAddressRequest(addr, tcpip.Address{} /* localAddr */, linkAddr) } e.mu.Lock() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go index 38b71f67ab..3e8a0a3ea1 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go @@ -44,12 +44,13 @@ type nic struct { stats sharedStats - // mu protects annotated fields below. - mu nicRWMutex + // enableDisableMu is used to synchronize attempts to enable/disable the NIC. + // Without this mutex, calls to enable/disable the NIC may interleave and + // leave the NIC in an inconsistent state. + enableDisableMu nicRWMutex // The network endpoints themselves may be modified by calling the interface's // methods, but the map reference and entries must be constant. - // +checklocks:mu networkEndpoints map[tcpip.NetworkProtocolNumber]NetworkEndpoint linkAddrResolvers map[tcpip.NetworkProtocolNumber]*linkResolver duplicateAddressDetectors map[tcpip.NetworkProtocolNumber]DuplicateAddressDetector @@ -208,8 +209,6 @@ func newNIC(stack *Stack, id tcpip.NICID, ep LinkEndpoint, opts NICOptions) *nic } func (n *nic) getNetworkEndpoint(proto tcpip.NetworkProtocolNumber) NetworkEndpoint { - n.mu.RLock() - defer n.mu.RUnlock() return n.networkEndpoints[proto] } @@ -221,6 +220,8 @@ func (n *nic) Enabled() bool { // setEnabled sets the enabled status for the NIC. // // Returns true if the enabled status was updated. +// +// +checklocks:n.enableDisableMu func (n *nic) setEnabled(v bool) bool { return n.enabled.Swap(v) != v } @@ -229,8 +230,8 @@ func (n *nic) setEnabled(v bool) bool { // // It undoes the work done by enable. func (n *nic) disable() { - n.mu.Lock() - defer n.mu.Unlock() + n.enableDisableMu.Lock() + defer n.enableDisableMu.Unlock() n.disableLocked() } @@ -238,7 +239,7 @@ func (n *nic) disable() { // // It undoes the work done by enable. // -// +checklocks:n.mu +// +checklocks:n.enableDisableMu func (n *nic) disableLocked() { if !n.Enabled() { return @@ -278,8 +279,8 @@ func (n *nic) disableLocked() { // routers if the stack is not operating as a router. If the stack is also // configured to auto-generate a link-local address, one will be generated. func (n *nic) enable() tcpip.Error { - n.mu.Lock() - defer n.mu.Unlock() + n.enableDisableMu.Lock() + defer n.enableDisableMu.Unlock() if !n.setEnabled(true) { return nil @@ -298,7 +299,7 @@ func (n *nic) enable() tcpip.Error { // resources. This guarantees no packets between this NIC and the network // stack. func (n *nic) remove() tcpip.Error { - n.mu.Lock() + n.enableDisableMu.Lock() n.disableLocked() @@ -306,13 +307,13 @@ func (n *nic) remove() tcpip.Error { ep.Close() } - n.mu.Unlock() + n.enableDisableMu.Unlock() // Shutdown GRO. n.gro.close() // Drain and drop any packets pending link resolution. - // We must not hold n.mu here. + // We must not hold n.enableDisableMu here. n.linkResQueue.cancel() // Prevent packets from going down to the link before shutting the link down. @@ -385,7 +386,16 @@ func (n *nic) writePacket(pkt PacketBufferPtr) tcpip.Error { return n.writeRawPacket(pkt) } +func (n *nic) writeRawPacketWithLinkHeaderInPayload(pkt PacketBufferPtr) tcpip.Error { + if !n.NetworkLinkEndpoint.ParseHeader(pkt) { + return &tcpip.ErrMalformedHeader{} + } + return n.writeRawPacket(pkt) +} + func (n *nic) writeRawPacket(pkt PacketBufferPtr) tcpip.Error { + // Always an outgoing packet. + pkt.PktType = tcpip.PacketOutgoing if err := n.qDisc.WritePacket(pkt); err != nil { if _, ok := err.(*tcpip.ErrNoBufferSpace); ok { n.stats.txPacketsDroppedNoBufferSpace.Increment() @@ -515,8 +525,6 @@ func (n *nic) addAddress(protocolAddress tcpip.ProtocolAddress, properties Addre // allPermanentAddresses returns all permanent addresses associated with // this NIC. func (n *nic) allPermanentAddresses() []tcpip.ProtocolAddress { - n.mu.RLock() - defer n.mu.RUnlock() var addrs []tcpip.ProtocolAddress for p, ep := range n.networkEndpoints { addressableEndpoint, ok := ep.(AddressableEndpoint) @@ -533,8 +541,6 @@ func (n *nic) allPermanentAddresses() []tcpip.ProtocolAddress { // primaryAddresses returns the primary addresses associated with this NIC. func (n *nic) primaryAddresses() []tcpip.ProtocolAddress { - n.mu.RLock() - defer n.mu.RUnlock() var addrs []tcpip.ProtocolAddress for p, ep := range n.networkEndpoints { addressableEndpoint, ok := ep.(AddressableEndpoint) @@ -566,8 +572,6 @@ func (n *nic) PrimaryAddress(proto tcpip.NetworkProtocolNumber) (tcpip.AddressWi // removeAddress removes an address from n. func (n *nic) removeAddress(addr tcpip.Address) tcpip.Error { - n.mu.RLock() - defer n.mu.RUnlock() for _, ep := range n.networkEndpoints { addressableEndpoint, ok := ep.(AddressableEndpoint) if !ok { @@ -586,8 +590,6 @@ func (n *nic) removeAddress(addr tcpip.Address) tcpip.Error { } func (n *nic) setAddressLifetimes(addr tcpip.Address, lifetimes AddressLifetimes) tcpip.Error { - n.mu.RLock() - defer n.mu.RUnlock() for _, ep := range n.networkEndpoints { ep, ok := ep.(AddressableEndpoint) if !ok { @@ -696,8 +698,6 @@ func (n *nic) leaveGroup(protocol tcpip.NetworkProtocolNumber, addr tcpip.Addres // isInGroup returns true if n has joined the multicast group addr. func (n *nic) isInGroup(addr tcpip.Address) bool { - n.mu.RLock() - defer n.mu.RUnlock() for _, ep := range n.networkEndpoints { gep, ok := ep.(GroupAddressableEndpoint) if !ok { @@ -738,7 +738,7 @@ func (n *nic) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt Pac n.gro.dispatch(pkt, protocol, networkEndpoint) } -func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr, incoming bool) { +func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) { // Deliver to interested packet endpoints without holding NIC lock. var packetEPPkt PacketBufferPtr defer func() { @@ -764,11 +764,13 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet // populate it in the packet buffer we provide to packet endpoints as // packet endpoints inspect link headers. packetEPPkt.LinkHeader().Consume(len(pkt.LinkHeader().Slice())) - - if incoming { + packetEPPkt.PktType = pkt.PktType + // Assume the packet is for us if the packet type is unset. + // The packet type is set to PacketOutgoing when sending packets so + // this may only be unset for incoming packets where link endpoints + // have not set it. + if packetEPPkt.PktType == 0 { packetEPPkt.PktType = tcpip.PacketHost - } else { - packetEPPkt.PktType = tcpip.PacketOutgoing } } @@ -785,7 +787,7 @@ func (n *nic) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt Packet n.packetEPsMu.Unlock() // On Linux, only ETH_P_ALL endpoints get outbound packets. - if incoming && protoEPsOK { + if pkt.PktType != tcpip.PacketOutgoing && protoEPsOK { protoEPs.forEach(deliverPacketEPs) } if anyEPsOK { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go index 8cf77616b1..86b756950b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer.go @@ -17,7 +17,7 @@ import ( "fmt" "io" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -35,7 +35,7 @@ const ( var pkPool = sync.Pool{ New: func() any { - return &packetBuffer{} + return &PacketBuffer{} }, } @@ -47,7 +47,7 @@ type PacketBufferOptions struct { // Payload is the initial unparsed data for the new packet. If set, it will // be owned by the new packet. - Payload bufferv2.Buffer + Payload buffer.Buffer // IsForwardedPacket identifies that the PacketBuffer being created is for a // forwarded packet. @@ -59,14 +59,9 @@ type PacketBufferOptions struct { } // PacketBufferPtr is a pointer to a PacketBuffer. -// -// +stateify savable -type PacketBufferPtr struct { - // packetBuffer is the underlying packet buffer. - *packetBuffer -} +type PacketBufferPtr = *PacketBuffer -// A packetBuffer contains all the data of a network packet. +// A PacketBuffer contains all the data of a network packet. // // As a PacketBuffer traverses up the stack, it may be necessary to pass it to // multiple endpoints. @@ -79,7 +74,7 @@ type PacketBufferPtr struct { // reference count to 1. Owners should call `DecRef()` when they are finished // with the buffer to return it to the pool. // -// Internal structure: A PacketBuffer holds a pointer to bufferv2.Buffer, which +// Internal structure: A PacketBuffer holds a pointer to buffer.Buffer, which // exposes a logically-contiguous byte storage. The underlying storage structure // is abstracted out, and should not be a concern here for most of the time. // @@ -108,14 +103,14 @@ type PacketBufferPtr struct { // starting offset of each header in `buf`. // // +stateify savable -type packetBuffer struct { +type PacketBuffer struct { _ sync.NoCopy packetBufferRefs // buf is the underlying buffer for the packet. See struct level docs for // details. - buf bufferv2.Buffer + buf buffer.Buffer reserved int pushed int consumed int @@ -178,10 +173,10 @@ type packetBuffer struct { // NewPacketBuffer creates a new PacketBuffer with opts. func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr { - pk := pkPool.Get().(*packetBuffer) + pk := pkPool.Get().(*PacketBuffer) pk.reset() if opts.ReserveHeaderBytes != 0 { - v := bufferv2.NewViewSize(opts.ReserveHeaderBytes) + v := buffer.NewViewSize(opts.ReserveHeaderBytes) pk.buf.Append(v) pk.reserved = opts.ReserveHeaderBytes } @@ -191,36 +186,31 @@ func NewPacketBuffer(opts PacketBufferOptions) PacketBufferPtr { pk.NetworkPacketInfo.IsForwardedPacket = opts.IsForwardedPacket pk.onRelease = opts.OnRelease pk.InitRefs() - return PacketBufferPtr{ - packetBuffer: pk, - } + return pk } // IncRef increments the PacketBuffer's refcount. func (pk PacketBufferPtr) IncRef() PacketBufferPtr { pk.packetBufferRefs.IncRef() - return PacketBufferPtr{ - packetBuffer: pk.packetBuffer, - } + return pk } // DecRef decrements the PacketBuffer's refcount. If the refcount is // decremented to zero, the PacketBuffer is returned to the PacketBuffer // pool. -func (pk *PacketBufferPtr) DecRef() { +func (pk PacketBufferPtr) DecRef() { pk.packetBufferRefs.DecRef(func() { if pk.onRelease != nil { pk.onRelease() } pk.buf.Release() - pkPool.Put(pk.packetBuffer) + pkPool.Put(pk) }) - pk.packetBuffer = nil } -func (pk *packetBuffer) reset() { - *pk = packetBuffer{} +func (pk PacketBufferPtr) reset() { + *pk = PacketBuffer{} } // ReservedHeaderBytes returns the number of bytes initially reserved for @@ -292,7 +282,7 @@ func (pk PacketBufferPtr) Data() PacketData { func (pk PacketBufferPtr) AsSlices() [][]byte { var views [][]byte offset := pk.headerOffset() - pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *bufferv2.View) { + pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) { views = append(views, v.AsSlice()) }) return views @@ -300,7 +290,7 @@ func (pk PacketBufferPtr) AsSlices() [][]byte { // ToBuffer returns a caller-owned copy of the underlying storage of the whole // packet. -func (pk PacketBufferPtr) ToBuffer() bufferv2.Buffer { +func (pk PacketBufferPtr) ToBuffer() buffer.Buffer { b := pk.buf.Clone() b.TrimFront(int64(pk.headerOffset())) return b @@ -308,10 +298,10 @@ func (pk PacketBufferPtr) ToBuffer() bufferv2.Buffer { // ToView returns a caller-owned copy of the underlying storage of the whole // packet as a view. -func (pk PacketBufferPtr) ToView() *bufferv2.View { - p := bufferv2.NewView(int(pk.buf.Size())) +func (pk PacketBufferPtr) ToView() *buffer.View { + p := buffer.NewView(int(pk.buf.Size())) offset := pk.headerOffset() - pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *bufferv2.View) { + pk.buf.SubApply(offset, int(pk.buf.Size())-offset, func(v *buffer.View) { p.Write(v.AsSlice()) }) return p @@ -359,10 +349,10 @@ func (pk PacketBufferPtr) consume(typ headerType, size int) (v []byte, consumed return view.AsSlice(), true } -func (pk PacketBufferPtr) headerView(typ headerType) bufferv2.View { +func (pk PacketBufferPtr) headerView(typ headerType) buffer.View { h := &pk.headers[typ] if h.length == 0 { - return bufferv2.View{} + return buffer.View{} } v, ok := pk.buf.PullUp(pk.headerOffsetOf(typ), h.length) if !ok { @@ -374,7 +364,7 @@ func (pk PacketBufferPtr) headerView(typ headerType) bufferv2.View { // Clone makes a semi-deep copy of pk. The underlying packet payload is // shared. Hence, no modifications is done to underlying packet payload. func (pk PacketBufferPtr) Clone() PacketBufferPtr { - newPk := pkPool.Get().(*packetBuffer) + newPk := pkPool.Get().(*PacketBuffer) newPk.reset() newPk.buf = pk.buf.Clone() newPk.reserved = pk.reserved @@ -394,9 +384,7 @@ func (pk PacketBufferPtr) Clone() PacketBufferPtr { newPk.NetworkPacketInfo = pk.NetworkPacketInfo newPk.tuple = pk.tuple newPk.InitRefs() - return PacketBufferPtr{ - packetBuffer: newPk, - } + return newPk } // ReserveHeaderBytes prepends reserved space for headers at the front @@ -406,7 +394,7 @@ func (pk PacketBufferPtr) ReserveHeaderBytes(reserved int) { panic(fmt.Sprintf("ReserveHeaderBytes(...) called on packet with reserved=%d, want reserved=0", pk.reserved)) } pk.reserved = reserved - pk.buf.Prepend(bufferv2.NewViewSize(reserved)) + pk.buf.Prepend(buffer.NewViewSize(reserved)) } // Network returns the network header as a header.Network. @@ -429,16 +417,14 @@ func (pk PacketBufferPtr) Network() header.Network { // See PacketBuffer.Data for details about how a packet buffer holds an inbound // packet. func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr { - newPk := pkPool.Get().(*packetBuffer) + newPk := pkPool.Get().(*PacketBuffer) newPk.reset() newPk.buf = pk.buf.Clone() newPk.InitRefs() // Treat unfilled header portion as reserved. newPk.reserved = pk.AvailableHeaderBytes() newPk.tuple = pk.tuple - return PacketBufferPtr{ - packetBuffer: newPk, - } + return newPk } // DeepCopyForForwarding creates a deep copy of the packet buffer for @@ -447,9 +433,11 @@ func (pk PacketBufferPtr) CloneToInbound() PacketBufferPtr { // The returned packet buffer will have the network and transport headers // set if the original packet buffer did. func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketBufferPtr { + payload := BufferSince(pk.NetworkHeader()) + defer payload.Release() newPk := NewPacketBuffer(PacketBufferOptions{ ReserveHeaderBytes: reservedHeaderBytes, - Payload: BufferSince(pk.NetworkHeader()), + Payload: payload.DeepClone(), IsForwardedPacket: true, }) @@ -476,7 +464,7 @@ func (pk PacketBufferPtr) DeepCopyForForwarding(reservedHeaderBytes int) PacketB // IsNil returns whether the pointer is logically nil. func (pk PacketBufferPtr) IsNil() bool { - return pk.packetBuffer == nil + return pk == nil } // headerInfo stores metadata about a header in a packet. @@ -498,8 +486,8 @@ type PacketHeader struct { } // View returns an caller-owned copy of the underlying storage of h as a -// *bufferv2.View. -func (h PacketHeader) View() *bufferv2.View { +// *buffer.View. +func (h PacketHeader) View() *buffer.View { view := h.pk.headerView(h.typ) if view.Size() == 0 { return nil @@ -565,7 +553,7 @@ func (d PacketData) ReadTo(dst io.Writer, peek bool) (int, error) { done int ) offset := d.pk.dataOffset() - d.pk.buf.SubApply(offset, int(d.pk.buf.Size())-offset, func(v *bufferv2.View) { + d.pk.buf.SubApply(offset, int(d.pk.buf.Size())-offset, func(v *buffer.View) { if err != nil { return } @@ -593,8 +581,8 @@ func (d PacketData) CapLength(length int) { d.pk.buf.Truncate(int64(length + d.pk.dataOffset())) } -// ToBuffer returns the underlying storage of d in a bufferv2.Buffer. -func (d PacketData) ToBuffer() bufferv2.Buffer { +// ToBuffer returns the underlying storage of d in a buffer.Buffer. +func (d PacketData) ToBuffer() buffer.Buffer { buf := d.pk.buf.Clone() offset := d.pk.dataOffset() buf.TrimFront(int64(offset)) @@ -602,12 +590,12 @@ func (d PacketData) ToBuffer() bufferv2.Buffer { } // AppendView appends v into d, taking the ownership of v. -func (d PacketData) AppendView(v *bufferv2.View) { +func (d PacketData) AppendView(v *buffer.View) { d.pk.buf.Append(v) } // MergeBuffer merges b into d and clears b. -func (d PacketData) MergeBuffer(b *bufferv2.Buffer) { +func (d PacketData) MergeBuffer(b *buffer.Buffer) { d.pk.buf.Merge(b) } @@ -620,7 +608,7 @@ func MergeFragment(dst, frag PacketBufferPtr) { // ReadFrom moves at most count bytes from the beginning of src to the end // of d and returns the number of bytes moved. -func (d PacketData) ReadFrom(src *bufferv2.Buffer, count int) int { +func (d PacketData) ReadFrom(src *buffer.Buffer, count int) int { toRead := int64(count) if toRead > src.Size() { toRead = src.Size() @@ -728,19 +716,19 @@ func (r Range) ToSlice() []byte { return nil } all := make([]byte, 0, r.length) - r.iterate(func(v *bufferv2.View) { + r.iterate(func(v *buffer.View) { all = append(all, v.AsSlice()...) }) return all } // ToView returns a caller-owned copy of data in r. -func (r Range) ToView() *bufferv2.View { +func (r Range) ToView() *buffer.View { if r.length == 0 { return nil } - newV := bufferv2.NewView(r.length) - r.iterate(func(v *bufferv2.View) { + newV := buffer.NewView(r.length) + r.iterate(func(v *buffer.View) { newV.Write(v.AsSlice()) }) return newV @@ -748,13 +736,13 @@ func (r Range) ToView() *bufferv2.View { // iterate calls fn for each piece in r. fn is always called with a non-empty // slice. -func (r Range) iterate(fn func(*bufferv2.View)) { +func (r Range) iterate(fn func(*buffer.View)) { r.pk.buf.SubApply(r.offset, r.length, fn) } // PayloadSince returns a caller-owned view containing the payload starting from // and including a particular header. -func PayloadSince(h PacketHeader) *bufferv2.View { +func PayloadSince(h PacketHeader) *buffer.View { offset := h.pk.headerOffset() for i := headerType(0); i < h.typ; i++ { offset += h.pk.headers[i].length @@ -768,7 +756,7 @@ func PayloadSince(h PacketHeader) *bufferv2.View { // BufferSince returns a caller-owned view containing the packet payload // starting from and including a particular header. -func BufferSince(h PacketHeader) bufferv2.Buffer { +func BufferSince(h PacketHeader) buffer.Buffer { offset := h.pk.headerOffset() for i := headerType(0); i < h.typ; i++ { offset += h.pk.headers[i].length diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go index 6f09b804dc..226b3e4952 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_list.go @@ -14,20 +14,26 @@ package stack // PacketBufferList is a slice-backed list. All operations are O(1) unless -// otherwise noted. It is optimized to for zero allocations when used with a -// queueing discipline. +// otherwise noted. // -// Users should call Init() before using PacketBufferList. +// Note: this is intentionally backed by a slice, not an intrusive list. We've +// switched PacketBufferList back-and-forth between intrusive list and +// slice-backed implementations, and the latter has proven to be preferable: +// +// - Intrusive lists are a refcounting nightmare, as modifying the list +// sometimes-but-not-always modifies the list for others. +// - The slice-backed implementation has been benchmarked and is slightly more +// performant. // // +stateify savable type PacketBufferList struct { - pbs []PacketBufferPtr + pbs []*PacketBuffer } // AsSlice returns a slice containing the packets in the list. // //go:nosplit -func (pl *PacketBufferList) AsSlice() []PacketBufferPtr { +func (pl *PacketBufferList) AsSlice() []*PacketBuffer { return pl.pbs } @@ -37,7 +43,7 @@ func (pl *PacketBufferList) AsSlice() []PacketBufferPtr { func (pl *PacketBufferList) Reset() { for i, pb := range pl.pbs { pb.DecRef() - pl.pbs[i] = PacketBufferPtr{} + pl.pbs[i] = nil } pl.pbs = pl.pbs[:0] } @@ -52,7 +58,7 @@ func (pl *PacketBufferList) Len() int { // PushBack inserts the PacketBuffer at the back of the list. // //go:nosplit -func (pl *PacketBufferList) PushBack(pb PacketBufferPtr) { +func (pl *PacketBufferList) PushBack(pb *PacketBuffer) { pl.pbs = append(pl.pbs, pb) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go index 8c205cda94..8b226b7373 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_refs.go @@ -15,7 +15,7 @@ const packetBufferenableLogging = false // obj is used to customize logging. Note that we use a pointer to T so that // we do not copy the entire object when passed as a format parameter. -var packetBufferobj *packetBuffer +var packetBufferobj *PacketBuffer // Refs implements refs.RefCounter. It keeps a reference count using atomic // operations and calls the destructor when the count reaches zero. @@ -43,7 +43,8 @@ type packetBufferRefs struct { // InitRefs initializes r with one reference and, if enabled, activates leak // checking. func (r *packetBufferRefs) InitRefs() { - r.refCount.Store(1) + + r.refCount.RacyStore(1) refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go index f0160ea877..ddfb80045b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/packet_buffer_unsafe.go @@ -17,12 +17,12 @@ package stack import "unsafe" // PacketBufferStructSize is the minimal size of the packet buffer overhead. -const PacketBufferStructSize = int(unsafe.Sizeof(packetBuffer{})) +const PacketBufferStructSize = int(unsafe.Sizeof(PacketBuffer{})) // ID returns a unique ID for the underlying storage of the packet. // // Two PacketBufferPtrs have the same IDs if and only if they point to the same // location in memory. func (pk PacketBufferPtr) ID() uintptr { - return uintptr(unsafe.Pointer(pk.packetBuffer)) + return uintptr(unsafe.Pointer(pk)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go index cf3bfe3b2a..4cd720820e 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/registration.go @@ -18,7 +18,7 @@ import ( "fmt" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/waiter" @@ -84,6 +84,21 @@ const ( // DestinationNetworkUnreachableTransportError indicates that the destination // network was unreachable. DestinationNetworkUnreachableTransportError + + // DestinationProtoUnreachableTransportError indicates that the destination + // protocol was unreachable. + DestinationProtoUnreachableTransportError + + // SourceRouteFailedTransportError indicates that the source route failed. + SourceRouteFailedTransportError + + // SourceHostIsolatedTransportError indicates that the source machine is not + // on the network. + SourceHostIsolatedTransportError + + // DestinationHostDownTransportError indicates that the destination host is + // down. + DestinationHostDownTransportError ) // TransportError is a marker interface for errors that may be handled by the @@ -1019,7 +1034,7 @@ type NetworkDispatcher interface { // This method should be called with both incoming and outgoing packets. // // If the link-layer has a header, the packet's link header must be populated. - DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr, incoming bool) + DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt PacketBufferPtr) } // LinkEndpointCapabilities is the type associated with the capabilities @@ -1050,6 +1065,9 @@ type LinkWriter interface { // WritePackets writes packets. Must not be called with an empty list of // packet buffers. // + // Each packet must have the link-layer header set, if the link requires + // one. + // // WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList. // it is not safe to use the PacketBufferList after a call to WritePackets. WritePackets(PacketBufferList) (int, tcpip.Error) @@ -1106,6 +1124,9 @@ type NetworkLinkEndpoint interface { // AddHeader adds a link layer header to the packet if required. AddHeader(PacketBufferPtr) + + // ParseHeader parses the link layer header to the packet. + ParseHeader(PacketBufferPtr) bool } // QueueingDiscipline provides a queueing strategy for outgoing packets (e.g @@ -1146,7 +1167,7 @@ type InjectableLinkEndpoint interface { // link. // // dest is used by endpoints with multiple raw destinations. - InjectOutbound(dest tcpip.Address, packet *bufferv2.View) tcpip.Error + InjectOutbound(dest tcpip.Address, packet *buffer.View) tcpip.Error } // DADResult is a marker interface for the result of a duplicate address diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go index c7006577fc..9755362af7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/route.go @@ -136,7 +136,7 @@ func (r *Route) fieldsLocked() RouteInfo { // // Returns an empty route if validation fails. func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndpoint AssignableAddressEndpoint, localAddressNIC, outgoingNIC *nic, gateway, localAddr, remoteAddr tcpip.Address, handleLocal, multicastLoop bool) *Route { - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { localAddr = addressEndpoint.AddressWithPrefix().Address } @@ -146,7 +146,7 @@ func constructAndValidateRoute(netProto tcpip.NetworkProtocolNumber, addressEndp } // If no remote address is provided, use the local address. - if len(remoteAddr) == 0 { + if remoteAddr.BitLen() == 0 { remoteAddr = localAddr } @@ -172,7 +172,7 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA panic(fmt.Sprintf("cannot create a route with NICs from different stacks")) } - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { localAddr = localAddressEndpoint.AddressWithPrefix().Address } @@ -182,7 +182,7 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA // link endpoint level. We can remove this check once loopback interfaces // loop back packets at the network layer. if !outgoingNIC.IsLoopback() { - if handleLocal && localAddr != "" && remoteAddr == localAddr { + if handleLocal && localAddr != (tcpip.Address{}) && remoteAddr == localAddr { loop = PacketLoop } else if multicastLoop && (header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr)) { loop |= PacketLoop @@ -206,7 +206,7 @@ func makeRoute(netProto tcpip.NetworkProtocolNumber, gateway, localAddr, remoteA } } - if len(gateway) > 0 { + if gateway.BitLen() > 0 { r.routeInfo.NextHop = gateway return r } @@ -327,8 +327,8 @@ func (r *Route) HasSaveRestoreCapability() bool { return r.outgoingNIC.NetworkLinkEndpoint.Capabilities()&CapabilitySaveRestore != 0 } -// HasDisconncetOkCapability returns true if the route supports disconnecting. -func (r *Route) HasDisconncetOkCapability() bool { +// HasDisconnectOkCapability returns true if the route supports disconnecting. +func (r *Route) HasDisconnectOkCapability() bool { return r.outgoingNIC.NetworkLinkEndpoint.Capabilities()&CapabilityDisconnectOk != 0 } @@ -434,7 +434,7 @@ func (r *Route) setCachedNeighborEntry(entry *neighborEntry) { } func (r *Route) nextHop() tcpip.Address { - if len(r.NextHop()) == 0 { + if r.NextHop().BitLen() == 0 { return r.RemoteAddress() } return r.NextHop() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go index 892033974a..382fcdd495 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack.go @@ -29,7 +29,7 @@ import ( "golang.org/x/time/rate" "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/log" cryptorand "gvisor.dev/gvisor/pkg/rand" "gvisor.dev/gvisor/pkg/tcpip" @@ -279,26 +279,26 @@ type TransportEndpointInfo struct { // Preconditon: the parent endpoint mu must be held while calling this method. func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6only bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) { netProto := t.NetProto - switch len(addr.Addr) { - case header.IPv4AddressSize: + switch addr.Addr.BitLen() { + case header.IPv4AddressSizeBits: netProto = header.IPv4ProtocolNumber - case header.IPv6AddressSize: + case header.IPv6AddressSizeBits: if header.IsV4MappedAddress(addr.Addr) { netProto = header.IPv4ProtocolNumber - addr.Addr = addr.Addr[header.IPv6AddressSize-header.IPv4AddressSize:] + addr.Addr = tcpip.AddrFrom4Slice(addr.Addr.AsSlice()[header.IPv6AddressSize-header.IPv4AddressSize:]) if addr.Addr == header.IPv4Any { - addr.Addr = "" + addr.Addr = tcpip.Address{} } } } - switch len(t.ID.LocalAddress) { - case header.IPv4AddressSize: - if len(addr.Addr) == header.IPv6AddressSize { + switch t.ID.LocalAddress.BitLen() { + case header.IPv4AddressSizeBits: + if addr.Addr.BitLen() == header.IPv6AddressSizeBits { return tcpip.FullAddress{}, 0, &tcpip.ErrInvalidEndpointState{} } - case header.IPv6AddressSize: - if len(addr.Addr) == header.IPv4AddressSize { + case header.IPv6AddressSizeBits: + if addr.Addr.BitLen() == header.IPv4AddressSizeBits { return tcpip.FullAddress{}, 0, &tcpip.ErrNetworkUnreachable{} } } @@ -710,11 +710,11 @@ func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error { } // GROTimeout returns the GRO timeout. -func (s *Stack) GROTimeout(NICID int32) (time.Duration, tcpip.Error) { +func (s *Stack) GROTimeout(nicID tcpip.NICID) (time.Duration, tcpip.Error) { s.mu.RLock() defer s.mu.RUnlock() - nic, ok := s.nics[tcpip.NICID(NICID)] + nic, ok := s.nics[nicID] if !ok { return 0, &tcpip.ErrUnknownNICID{} } @@ -723,11 +723,11 @@ func (s *Stack) GROTimeout(NICID int32) (time.Duration, tcpip.Error) { } // SetGROTimeout sets the GRO timeout. -func (s *Stack) SetGROTimeout(NICID int32, timeout time.Duration) tcpip.Error { +func (s *Stack) SetGROTimeout(nicID tcpip.NICID, timeout time.Duration) tcpip.Error { s.mu.RLock() defer s.mu.RUnlock() - nic, ok := s.nics[tcpip.NICID(NICID)] + nic, ok := s.nics[nicID] if !ok { return &tcpip.ErrUnknownNICID{} } @@ -1049,11 +1049,9 @@ func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo { } netStats := make(map[tcpip.NetworkProtocolNumber]NetworkEndpointStats) - nic.mu.RLock() for proto, netEP := range nic.networkEndpoints { netStats[proto] = netEP.Stats() } - nic.mu.RUnlock() info := NICInfo{ Name: nic.name, @@ -1169,7 +1167,7 @@ func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocol } func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint { - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { return nic.primaryEndpoint(netProto, remoteAddr) } return nic.findEndpoint(netProto, localAddr, CanBePrimaryEndpoint) @@ -1188,8 +1186,8 @@ func (s *Stack) NewRouteForMulticast(nicID tcpip.NICID, remoteAddr tcpip.Address return nil } - if addressEndpoint := s.getAddressEP(nic, "" /* localAddr */, remoteAddr, netProto); addressEndpoint != nil { - return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, "" /* gateway */, "" /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */) + if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, netProto); addressEndpoint != nil { + return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, tcpip.Address{} /* gateway */, tcpip.Address{} /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */) } return nil } @@ -1252,7 +1250,7 @@ func (s *Stack) findLocalRouteFromNICRLocked(localAddressNIC *nic, localAddr, re // // +checklocksread:s.mu func (s *Stack) findLocalRouteRLocked(localAddressNICID tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route { - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { localAddr = remoteAddr } @@ -1326,7 +1324,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil { return makeRoute( netProto, - "", /* gateway */ + tcpip.Address{}, /* gateway */ localAddr, remoteAddr, nic, /* outboundNIC */ @@ -1353,7 +1351,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n defer s.routeMu.RUnlock() for _, route := range s.routeTable { - if len(remoteAddr) != 0 && !route.Destination.Contains(remoteAddr) { + if remoteAddr.BitLen() != 0 && !route.Destination.Contains(remoteAddr) { continue } @@ -1383,7 +1381,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n // requirement to do this from any RFC but simply a choice made to better // follow a strong host model which the netstack follows at the time of // writing. - if onlyGlobalAddresses && chosenRoute == (tcpip.Route{}) && isNICForwarding(nic, netProto) { + if onlyGlobalAddresses && chosenRoute.Equal(tcpip.Route{}) && isNICForwarding(nic, netProto) { chosenRoute = route } } @@ -1393,7 +1391,7 @@ func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, n return r, nil } - if chosenRoute != (tcpip.Route{}) { + if !chosenRoute.Equal(tcpip.Route{}) { // At this point we know the stack has forwarding enabled since chosenRoute is // only set when forwarding is enabled. nic, ok := s.nics[chosenRoute.NIC] @@ -1858,7 +1856,7 @@ func (s *Stack) unregisterPacketEndpointLocked(nicID tcpip.NICID, netProto tcpip // WritePacketToRemote writes a payload on the specified NIC using the provided // network protocol and remote link address. -func (s *Stack) WritePacketToRemote(nicID tcpip.NICID, remote tcpip.LinkAddress, netProto tcpip.NetworkProtocolNumber, payload bufferv2.Buffer) tcpip.Error { +func (s *Stack) WritePacketToRemote(nicID tcpip.NICID, remote tcpip.LinkAddress, netProto tcpip.NetworkProtocolNumber, payload buffer.Buffer) tcpip.Error { s.mu.Lock() nic, ok := s.nics[nicID] s.mu.Unlock() @@ -1876,7 +1874,7 @@ func (s *Stack) WritePacketToRemote(nicID tcpip.NICID, remote tcpip.LinkAddress, // WriteRawPacket writes data directly to the specified NIC without adding any // headers. -func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, payload bufferv2.Buffer) tcpip.Error { +func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, payload buffer.Buffer) tcpip.Error { s.mu.RLock() nic, ok := s.nics[nicID] s.mu.RUnlock() @@ -1889,7 +1887,7 @@ func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNum }) defer pkt.DecRef() pkt.NetworkProtocolNumber = proto - return nic.writeRawPacket(pkt) + return nic.writeRawPacketWithLinkHeaderInPayload(pkt) } // NetworkProtocolInstance returns the protocol instance in the stack for the diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go index f0f97be3ef..cb566962d4 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/stack_state_autogen.go @@ -487,36 +487,11 @@ func (e *neighborEntryEntry) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(1, &e.prev) } -func (pk *PacketBufferPtr) StateTypeName() string { - return "pkg/tcpip/stack.PacketBufferPtr" +func (p *PacketBuffer) StateTypeName() string { + return "pkg/tcpip/stack.PacketBuffer" } -func (pk *PacketBufferPtr) StateFields() []string { - return []string{ - "packetBuffer", - } -} - -func (pk *PacketBufferPtr) beforeSave() {} - -// +checklocksignore -func (pk *PacketBufferPtr) StateSave(stateSinkObject state.Sink) { - pk.beforeSave() - stateSinkObject.Save(0, &pk.packetBuffer) -} - -func (pk *PacketBufferPtr) afterLoad() {} - -// +checklocksignore -func (pk *PacketBufferPtr) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &pk.packetBuffer) -} - -func (pk *packetBuffer) StateTypeName() string { - return "pkg/tcpip/stack.packetBuffer" -} - -func (pk *packetBuffer) StateFields() []string { +func (p *PacketBuffer) StateFields() []string { return []string{ "packetBufferRefs", "buf", @@ -540,55 +515,55 @@ func (pk *packetBuffer) StateFields() []string { } } -func (pk *packetBuffer) beforeSave() {} - -// +checklocksignore -func (pk *packetBuffer) StateSave(stateSinkObject state.Sink) { - pk.beforeSave() - stateSinkObject.Save(0, &pk.packetBufferRefs) - stateSinkObject.Save(1, &pk.buf) - stateSinkObject.Save(2, &pk.reserved) - stateSinkObject.Save(3, &pk.pushed) - stateSinkObject.Save(4, &pk.consumed) - stateSinkObject.Save(5, &pk.headers) - stateSinkObject.Save(6, &pk.NetworkProtocolNumber) - stateSinkObject.Save(7, &pk.TransportProtocolNumber) - stateSinkObject.Save(8, &pk.Hash) - stateSinkObject.Save(9, &pk.Owner) - stateSinkObject.Save(10, &pk.EgressRoute) - stateSinkObject.Save(11, &pk.GSOOptions) - stateSinkObject.Save(12, &pk.snatDone) - stateSinkObject.Save(13, &pk.dnatDone) - stateSinkObject.Save(14, &pk.PktType) - stateSinkObject.Save(15, &pk.NICID) - stateSinkObject.Save(16, &pk.RXChecksumValidated) - stateSinkObject.Save(17, &pk.NetworkPacketInfo) - stateSinkObject.Save(18, &pk.tuple) -} - -func (pk *packetBuffer) afterLoad() {} - -// +checklocksignore -func (pk *packetBuffer) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &pk.packetBufferRefs) - stateSourceObject.Load(1, &pk.buf) - stateSourceObject.Load(2, &pk.reserved) - stateSourceObject.Load(3, &pk.pushed) - stateSourceObject.Load(4, &pk.consumed) - stateSourceObject.Load(5, &pk.headers) - stateSourceObject.Load(6, &pk.NetworkProtocolNumber) - stateSourceObject.Load(7, &pk.TransportProtocolNumber) - stateSourceObject.Load(8, &pk.Hash) - stateSourceObject.Load(9, &pk.Owner) - stateSourceObject.Load(10, &pk.EgressRoute) - stateSourceObject.Load(11, &pk.GSOOptions) - stateSourceObject.Load(12, &pk.snatDone) - stateSourceObject.Load(13, &pk.dnatDone) - stateSourceObject.Load(14, &pk.PktType) - stateSourceObject.Load(15, &pk.NICID) - stateSourceObject.Load(16, &pk.RXChecksumValidated) - stateSourceObject.Load(17, &pk.NetworkPacketInfo) - stateSourceObject.Load(18, &pk.tuple) +func (p *PacketBuffer) beforeSave() {} + +// +checklocksignore +func (p *PacketBuffer) StateSave(stateSinkObject state.Sink) { + p.beforeSave() + stateSinkObject.Save(0, &p.packetBufferRefs) + stateSinkObject.Save(1, &p.buf) + stateSinkObject.Save(2, &p.reserved) + stateSinkObject.Save(3, &p.pushed) + stateSinkObject.Save(4, &p.consumed) + stateSinkObject.Save(5, &p.headers) + stateSinkObject.Save(6, &p.NetworkProtocolNumber) + stateSinkObject.Save(7, &p.TransportProtocolNumber) + stateSinkObject.Save(8, &p.Hash) + stateSinkObject.Save(9, &p.Owner) + stateSinkObject.Save(10, &p.EgressRoute) + stateSinkObject.Save(11, &p.GSOOptions) + stateSinkObject.Save(12, &p.snatDone) + stateSinkObject.Save(13, &p.dnatDone) + stateSinkObject.Save(14, &p.PktType) + stateSinkObject.Save(15, &p.NICID) + stateSinkObject.Save(16, &p.RXChecksumValidated) + stateSinkObject.Save(17, &p.NetworkPacketInfo) + stateSinkObject.Save(18, &p.tuple) +} + +func (p *PacketBuffer) afterLoad() {} + +// +checklocksignore +func (p *PacketBuffer) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &p.packetBufferRefs) + stateSourceObject.Load(1, &p.buf) + stateSourceObject.Load(2, &p.reserved) + stateSourceObject.Load(3, &p.pushed) + stateSourceObject.Load(4, &p.consumed) + stateSourceObject.Load(5, &p.headers) + stateSourceObject.Load(6, &p.NetworkProtocolNumber) + stateSourceObject.Load(7, &p.TransportProtocolNumber) + stateSourceObject.Load(8, &p.Hash) + stateSourceObject.Load(9, &p.Owner) + stateSourceObject.Load(10, &p.EgressRoute) + stateSourceObject.Load(11, &p.GSOOptions) + stateSourceObject.Load(12, &p.snatDone) + stateSourceObject.Load(13, &p.dnatDone) + stateSourceObject.Load(14, &p.PktType) + stateSourceObject.Load(15, &p.NICID) + stateSourceObject.Load(16, &p.RXChecksumValidated) + stateSourceObject.Load(17, &p.NetworkPacketInfo) + stateSourceObject.Load(18, &p.tuple) } func (h *headerInfo) StateTypeName() string { @@ -1581,8 +1556,7 @@ func init() { state.Register((*IPHeaderFilter)(nil)) state.Register((*neighborEntryList)(nil)) state.Register((*neighborEntryEntry)(nil)) - state.Register((*PacketBufferPtr)(nil)) - state.Register((*packetBuffer)(nil)) + state.Register((*PacketBuffer)(nil)) state.Register((*headerInfo)(nil)) state.Register((*PacketData)(nil)) state.Register((*PacketBufferList)(nil)) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go index adb54ffe45..6d38b637e2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stack/transport_demuxer.go @@ -82,7 +82,7 @@ func (eps *transportEndpoints) iterEndpointsLocked(id TransportEndpointID, yield // Try to find a match with the id minus the local address. nid := id - nid.LocalAddress = "" + nid.LocalAddress = tcpip.Address{} if ep, ok := eps.endpoints[nid]; ok { if !yield(ep) { return @@ -91,7 +91,7 @@ func (eps *transportEndpoints) iterEndpointsLocked(id TransportEndpointID, yield // Try to find a match with the id minus the remote part. nid.LocalAddress = id.LocalAddress - nid.RemoteAddress = "" + nid.RemoteAddress = tcpip.Address{} nid.RemotePort = 0 if ep, ok := eps.endpoints[nid]; ok { if !yield(ep) { @@ -100,7 +100,7 @@ func (eps *transportEndpoints) iterEndpointsLocked(id TransportEndpointID, yield } // Try to find a match with only the local port. - nid.LocalAddress = "" + nid.LocalAddress = tcpip.Address{} if ep, ok := eps.endpoints[nid]; ok { if !yield(ep) { return @@ -392,8 +392,8 @@ func (ep *multiPortEndpoint) selectEndpoint(id TransportEndpointID, seed uint32) h := jenkins.Sum32(seed) h.Write(payload) - h.Write([]byte(id.LocalAddress)) - h.Write([]byte(id.RemoteAddress)) + h.Write(id.LocalAddress.AsSlice()) + h.Write(id.RemoteAddress.AsSlice()) hash := h.Sum32() idx := reciprocalScale(hash, uint32(len(ep.endpoints))) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go index 371da2f409..cc3397ca91 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock.go @@ -17,8 +17,6 @@ package tcpip import ( "fmt" "time" - - "gvisor.dev/gvisor/pkg/sync" ) // stdClock implements Clock with the time package. @@ -57,14 +55,9 @@ type stdClock struct { // monotonicOffset is the offset applied to the calculated monotonic time. // - // monotonicOffset is assigned maxMonotonic after restore so that the - // monotonic time will continue from where it "left off" before saving as part - // of S/R. - monotonicOffset MonotonicTime `state:"nosave"` - - // monotonicMU protects maxMonotonic. - monotonicMU sync.Mutex `state:"nosave"` - maxMonotonic MonotonicTime + // monotonicOffset is assigned after restore so that the monotonic time + // will continue from where it "left off" before saving as part of S/R. + monotonicOffset MonotonicTime } // NewStdClock returns an instance of a clock that uses the time package. @@ -88,17 +81,7 @@ func (s *stdClock) NowMonotonic() MonotonicTime { panic(fmt.Sprintf("got negative duration = %s since base time = %s", sinceBase, s.baseTime)) } - monotonicValue := s.monotonicOffset.Add(sinceBase) - - s.monotonicMU.Lock() - defer s.monotonicMU.Unlock() - - // Monotonic time values must never decrease. - if s.maxMonotonic.Before(monotonicValue) { - s.maxMonotonic = monotonicValue - } - - return s.maxMonotonic + return s.monotonicOffset.Add(sinceBase) } // AfterFunc implements Clock.AfterFunc. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go index 795db9181d..25be1755fa 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/stdclock_state.go @@ -16,11 +16,12 @@ package tcpip import "time" +// beforeSave is invoked by stateify. +func (s *stdClock) beforeSave() { + s.monotonicOffset = s.NowMonotonic() +} + // afterLoad is invoked by stateify. func (s *stdClock) afterLoad() { s.baseTime = time.Now() - - s.monotonicMU.Lock() - defer s.monotonicMU.Unlock() - s.monotonicOffset = s.maxMonotonic } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go index a439709013..92ac54db4b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip.go @@ -151,19 +151,116 @@ type Timer interface { // Address is a byte slice cast as a string that represents the address of a // network node. Or, in the case of unix endpoints, it may represent a path. -type Address string +// +// +stateify savable +type Address struct { + addr [16]byte + length int +} + +// AddrFrom4 converts addr to an Address. +func AddrFrom4(addr [4]byte) Address { + ret := Address{ + length: 4, + } + // It's guaranteed that copy will return 4. + copy(ret.addr[:], addr[:]) + return ret +} + +// AddrFrom4Slice converts addr to an Address. It panics if len(addr) != 4. +func AddrFrom4Slice(addr []byte) Address { + if len(addr) != 4 { + panic(fmt.Sprintf("bad address length for address %v", addr)) + } + ret := Address{ + length: 4, + } + // It's guaranteed that copy will return 4. + copy(ret.addr[:], addr) + return ret +} + +// AddrFrom16 converts addr to an Address. +func AddrFrom16(addr [16]byte) Address { + ret := Address{ + length: 16, + } + // It's guaranteed that copy will return 16. + copy(ret.addr[:], addr[:]) + return ret +} + +// AddrFrom16Slice converts addr to an Address. It panics if len(addr) != 16. +func AddrFrom16Slice(addr []byte) Address { + if len(addr) != 16 { + panic(fmt.Sprintf("bad address length for address %v", addr)) + } + ret := Address{ + length: 16, + } + // It's guaranteed that copy will return 16. + copy(ret.addr[:], addr) + return ret +} + +// AddrFromSlice converts addr to an Address. It returns the Address zero value +// if len(addr) != 4 or 16. +func AddrFromSlice(addr []byte) Address { + switch len(addr) { + case ipv4AddressSize: + return AddrFrom4Slice(addr) + case ipv6AddressSize: + return AddrFrom16Slice(addr) + } + return Address{} +} + +// As4 returns a as a 4 byte array. It panics if the address length is not 4. +func (a Address) As4() [4]byte { + if a.Len() != 4 { + panic(fmt.Sprintf("bad address length for address %v", a.addr)) + } + return [4]byte(a.addr[:4]) +} + +// As16 returns a as a 16 byte array. It panics if the address length is not 16. +func (a Address) As16() [16]byte { + if a.Len() != 16 { + panic(fmt.Sprintf("bad address length for address %v", a.addr)) + } + return [16]byte(a.addr[:16]) +} + +// AsSlice returns a as a byte slice. Callers should be careful as it can +// return a window into existing memory. +// +// +checkescape +func (a *Address) AsSlice() []byte { + return a.addr[:a.length] +} + +// BitLen returns the length in bits of a. +func (a Address) BitLen() int { + return a.Len() * 8 +} + +// Len returns the length in bytes of a. +func (a Address) Len() int { + return a.length +} // WithPrefix returns the address with a prefix that represents a point subnet. func (a Address) WithPrefix() AddressWithPrefix { return AddressWithPrefix{ Address: a, - PrefixLen: len(a) * 8, + PrefixLen: a.BitLen(), } } // Unspecified returns true if the address is unspecified. func (a Address) Unspecified() bool { - for _, b := range a { + for _, b := range a.addr { if b != 0 { return false } @@ -171,20 +268,26 @@ func (a Address) Unspecified() bool { return true } +// Equal returns whether a and other are equal. It exists for use by the cmp +// library. +func (a Address) Equal(other Address) bool { + return a == other +} + // MatchingPrefix returns the matching prefix length in bits. // // Panics if b and a have different lengths. func (a Address) MatchingPrefix(b Address) uint8 { const bitsInAByte = 8 - if len(a) != len(b) { + if a.Len() != b.Len() { panic(fmt.Sprintf("addresses %s and %s do not have the same length", a, b)) } var prefix uint8 - for i := range a { - aByte := a[i] - bByte := b[i] + for i := 0; i < a.length; i++ { + aByte := a.addr[i] + bByte := b.addr[i] if aByte == bByte { prefix += bitsInAByte @@ -210,22 +313,58 @@ func (a Address) MatchingPrefix(b Address) uint8 { } // AddressMask is a bitmask for an address. -type AddressMask string +// +// +stateify savable +type AddressMask struct { + mask string +} + +// MaskFrom returns a Mask based on str. +func MaskFrom(str string) AddressMask { + return AddressMask{mask: str} +} + +// MaskFromBytes returns a Mask based on bs. +func MaskFromBytes(bs []byte) AddressMask { + return AddressMask{mask: string(bs)} +} // String implements Stringer. func (m AddressMask) String() string { - return Address(m).String() + return fmt.Sprintf("%x", m.mask) +} + +// AsSlice returns a as a byte slice. Callers should be careful as it can +// return a window into existing memory. +func (m *AddressMask) AsSlice() []byte { + return []byte(m.mask) +} + +// BitLen returns the length of the mask in bits. +func (m AddressMask) BitLen() int { + return len(m.mask) * 8 +} + +// Len returns the length of the mask in bytes. +func (m AddressMask) Len() int { + return len(m.mask) } // Prefix returns the number of bits before the first host bit. func (m AddressMask) Prefix() int { p := 0 - for _, b := range []byte(m) { + for _, b := range []byte(m.mask) { p += bits.LeadingZeros8(^b) } return p } +// Equal returns whether m and other are equal. It exists for use by the cmp +// library. +func (m AddressMask) Equal(other AddressMask) bool { + return m == other +} + // Subnet is a subnet defined by its address and mask. type Subnet struct { address Address @@ -234,11 +373,11 @@ type Subnet struct { // NewSubnet creates a new Subnet, checking that the address and mask are the same length. func NewSubnet(a Address, m AddressMask) (Subnet, error) { - if len(a) != len(m) { + if a.Len() != m.Len() { return Subnet{}, errSubnetLengthMismatch } - for i := 0; i < len(a); i++ { - if a[i]&^m[i] != 0 { + for i := 0; i < a.Len(); i++ { + if a.addr[i]&^m.mask[i] != 0 { return Subnet{}, errSubnetAddressMasked } } @@ -253,11 +392,11 @@ func (s Subnet) String() string { // Contains returns true iff the address is of the same length and matches the // subnet address and mask. func (s *Subnet) Contains(a Address) bool { - if len(a) != len(s.address) { + if a.Len() != s.address.Len() { return false } - for i := 0; i < len(a); i++ { - if a[i]&s.mask[i] != s.address[i] { + for i := 0; i < a.Len(); i++ { + if a.addr[i]&s.mask.mask[i] != s.address.addr[i] { return false } } @@ -273,7 +412,7 @@ func (s *Subnet) ID() Address { // subnet mask. func (s *Subnet) Bits() (ones int, zeros int) { ones = s.mask.Prefix() - return ones, len(s.mask)*8 - ones + return ones, s.mask.BitLen() - ones } // Prefix returns the number of bits before the first host bit. @@ -288,17 +427,17 @@ func (s *Subnet) Mask() AddressMask { // Broadcast returns the subnet's broadcast address. func (s *Subnet) Broadcast() Address { - addr := []byte(s.address) - for i := range addr { - addr[i] |= ^s.mask[i] + addrCopy := s.address + for i := 0; i < addrCopy.Len(); i++ { + addrCopy.addr[i] |= ^s.mask.mask[i] } - return Address(addr) + return addrCopy } // IsBroadcast returns true if the address is considered a broadcast address. func (s *Subnet) IsBroadcast(address Address) bool { // Only IPv4 supports the notion of a broadcast address. - if len(address) != ipv4AddressSize { + if address.Len() != ipv4AddressSize { return false } @@ -370,13 +509,16 @@ type FullAddress struct { // This may not be used by all endpoint types. NIC NICID - // Addr is the network or link layer address. + // Addr is the network address. Addr Address // Port is the transport port. // // This may not be used by all endpoint types. Port uint16 + + // LinkAddr is the link layer address. + LinkAddr LinkAddress } // Payloader is an interface that provides data. @@ -1358,7 +1500,7 @@ type Route struct { func (r Route) String() string { var out strings.Builder _, _ = fmt.Fprintf(&out, "%s", r.Destination) - if len(r.Gateway) > 0 { + if r.Gateway.length > 0 { _, _ = fmt.Fprintf(&out, " via %s", r.Gateway) } _, _ = fmt.Fprintf(&out, " nic %d", r.NIC) @@ -1368,7 +1510,7 @@ func (r Route) String() string { // Equal returns true if the given Route is equal to this Route. func (r Route) Equal(to Route) bool { // NOTE: This relies on the fact that r.Destination == to.Destination - return r == to + return r.Destination.Equal(to.Destination) && r.Gateway == to.Gateway && r.NIC == to.NIC } // TransportProtocolNumber is the number of a transport protocol. @@ -1398,7 +1540,7 @@ func (s *StatCounter) Decrement() { } // Value returns the current value of the counter. -func (s *StatCounter) Value(...string) uint64 { +func (s *StatCounter) Value() uint64 { return s.count.Load() } @@ -1563,6 +1705,10 @@ type ICMPv6PacketStats struct { // counted. MulticastListenerReport *StatCounter + // MulticastListenerReportV2 is the number of Multicast Listener Report + // messages counted. + MulticastListenerReportV2 *StatCounter + // MulticastListenerDone is the number of Multicast Listener Done messages // counted. MulticastListenerDone *StatCounter @@ -1643,6 +1789,10 @@ type IGMPPacketStats struct { // counted. V2MembershipReport *StatCounter + // V3MembershipReport is the number of Version 3 Membership Report messages + // counted. + V3MembershipReport *StatCounter + // LeaveGroup is the number of Leave Group messages counted. LeaveGroup *StatCounter @@ -1705,6 +1855,11 @@ type IPForwardingStats struct { // because their TTL was exhausted. ExhaustedTTL *StatCounter + // InitializingSource is the number of IP packets which were dropped + // because they contained a source address that may only be used on the local + // network as part of initialization work. + InitializingSource *StatCounter + // LinkLocalSource is the number of IP packets which were dropped // because they contained a link-local source address. LinkLocalSource *StatCounter @@ -1739,6 +1894,10 @@ type IPForwardingStats struct { // were dropped due to insufficent buffer space in the pending packet queue. NoMulticastPendingQueueBufferSpace *StatCounter + // OutgoingDeviceNoBufferSpace is the number of packets that were dropped due + // to insufficient space in the outgoing device. + OutgoingDeviceNoBufferSpace *StatCounter + // Errors is the number of IP packets received which could not be // successfully forwarded. Errors *StatCounter @@ -2358,15 +2517,15 @@ func clone(dst reflect.Value, src reflect.Value) { // String implements the fmt.Stringer interface. func (a Address) String() string { - switch len(a) { + switch l := a.Len(); l { case 4: - return fmt.Sprintf("%d.%d.%d.%d", int(a[0]), int(a[1]), int(a[2]), int(a[3])) + return fmt.Sprintf("%d.%d.%d.%d", int(a.addr[0]), int(a.addr[1]), int(a.addr[2]), int(a.addr[3])) case 16: // Find the longest subsequence of hexadecimal zeros. start, end := -1, -1 - for i := 0; i < len(a); i += 2 { + for i := 0; i < a.Len(); i += 2 { j := i - for j < len(a) && a[j] == 0 && a[j+1] == 0 { + for j < a.Len() && a.addr[j] == 0 && a.addr[j+1] == 0 { j += 2 } if j > i+2 && j-i > end-start { @@ -2375,17 +2534,17 @@ func (a Address) String() string { } var b strings.Builder - for i := 0; i < len(a); i += 2 { + for i := 0; i < a.Len(); i += 2 { if i == start { b.WriteString("::") i = end - if end >= len(a) { + if end >= a.Len() { break } } else if i > 0 { b.WriteByte(':') } - v := uint16(a[i+0])<<8 | uint16(a[i+1]) + v := uint16(a.addr[i+0])<<8 | uint16(a.addr[i+1]) if v == 0 { b.WriteByte('0') } else { @@ -2399,33 +2558,33 @@ func (a Address) String() string { } return b.String() default: - return fmt.Sprintf("%x", []byte(a)) + return fmt.Sprintf("%x", a.addr[:l]) } } // To4 converts the IPv4 address to a 4-byte representation. -// If the address is not an IPv4 address, To4 returns "". +// If the address is not an IPv4 address, To4 returns the empty Address. func (a Address) To4() Address { const ( ipv4len = 4 ipv6len = 16 ) - if len(a) == ipv4len { + if a.Len() == ipv4len { return a } - if len(a) == ipv6len && - isZeros(a[0:10]) && - a[10] == 0xff && - a[11] == 0xff { - return a[12:16] + if a.Len() == ipv6len && + isZeros(a.addr[:10]) && + a.addr[10] == 0xff && + a.addr[11] == 0xff { + return AddrFrom4Slice(a.addr[12:16]) } - return "" + return Address{} } -// isZeros reports whether a is all zeros. -func isZeros(a Address) bool { - for i := 0; i < len(a); i++ { - if a[i] != 0 { +// isZeros reports whether addr is all zeros. +func isZeros(addr []byte) bool { + for _, b := range addr { + if b != 0 { return false } } @@ -2468,6 +2627,8 @@ func ParseMACAddress(s string) (LinkAddress, error) { } // AddressWithPrefix is an address with its subnet prefix length. +// +// +stateify savable type AddressWithPrefix struct { // Address is a network address. Address Address @@ -2483,17 +2644,17 @@ func (a AddressWithPrefix) String() string { // Subnet converts the address and prefix into a Subnet value and returns it. func (a AddressWithPrefix) Subnet() Subnet { - addrLen := len(a.Address) + addrLen := a.Address.length if a.PrefixLen <= 0 { return Subnet{ - address: Address(strings.Repeat("\x00", addrLen)), - mask: AddressMask(strings.Repeat("\x00", addrLen)), + address: AddrFromSlice(bytes.Repeat([]byte{0}, addrLen)), + mask: MaskFromBytes(bytes.Repeat([]byte{0}, addrLen)), } } if a.PrefixLen >= addrLen*8 { return Subnet{ address: a.Address, - mask: AddressMask(strings.Repeat("\xff", addrLen)), + mask: MaskFromBytes(bytes.Repeat([]byte{0xff}, addrLen)), } } @@ -2502,20 +2663,20 @@ func (a AddressWithPrefix) Subnet() Subnet { n := uint(a.PrefixLen) for i := 0; i < addrLen; i++ { if n >= 8 { - sa[i] = a.Address[i] + sa[i] = a.Address.addr[i] sm[i] = 0xff n -= 8 continue } sm[i] = ^byte(0xff >> n) - sa[i] = a.Address[i] & sm[i] + sa[i] = a.Address.addr[i] & sm[i] n = 0 } // For extra caution, call NewSubnet rather than directly creating the Subnet // value. If that fails it indicates a serious bug in this code, so panic is // in order. - s, err := NewSubnet(Address(sa), AddressMask(sm)) + s, err := NewSubnet(AddrFromSlice(sa), MaskFromBytes(sm)) if err != nil { panic("invalid subnet: " + err.Error()) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go index 4965e3a18b..7fce7aeb49 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/tcpip_state_autogen.go @@ -573,6 +573,48 @@ func (e *ErrHostUnreachable) afterLoad() {} func (e *ErrHostUnreachable) StateLoad(stateSourceObject state.Source) { } +func (e *ErrHostDown) StateTypeName() string { + return "pkg/tcpip.ErrHostDown" +} + +func (e *ErrHostDown) StateFields() []string { + return []string{} +} + +func (e *ErrHostDown) beforeSave() {} + +// +checklocksignore +func (e *ErrHostDown) StateSave(stateSinkObject state.Sink) { + e.beforeSave() +} + +func (e *ErrHostDown) afterLoad() {} + +// +checklocksignore +func (e *ErrHostDown) StateLoad(stateSourceObject state.Source) { +} + +func (e *ErrNoNet) StateTypeName() string { + return "pkg/tcpip.ErrNoNet" +} + +func (e *ErrNoNet) StateFields() []string { + return []string{} +} + +func (e *ErrNoNet) beforeSave() {} + +// +checklocksignore +func (e *ErrNoNet) StateSave(stateSinkObject state.Sink) { + e.beforeSave() +} + +func (e *ErrNoNet) afterLoad() {} + +// +checklocksignore +func (e *ErrNoNet) StateLoad(stateSourceObject state.Source) { +} + func (e *ErrNoSuchFile) StateTypeName() string { return "pkg/tcpip.ErrNoSuchFile" } @@ -1103,21 +1145,19 @@ func (s *stdClock) StateTypeName() string { func (s *stdClock) StateFields() []string { return []string{ - "maxMonotonic", + "monotonicOffset", } } -func (s *stdClock) beforeSave() {} - // +checklocksignore func (s *stdClock) StateSave(stateSinkObject state.Sink) { s.beforeSave() - stateSinkObject.Save(0, &s.maxMonotonic) + stateSinkObject.Save(0, &s.monotonicOffset) } // +checklocksignore func (s *stdClock) StateLoad(stateSourceObject state.Source) { - stateSourceObject.Load(0, &s.maxMonotonic) + stateSourceObject.Load(0, &s.monotonicOffset) stateSourceObject.AfterLoad(s.afterLoad) } @@ -1146,6 +1186,59 @@ func (mt *MonotonicTime) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(0, &mt.nanoseconds) } +func (a *Address) StateTypeName() string { + return "pkg/tcpip.Address" +} + +func (a *Address) StateFields() []string { + return []string{ + "addr", + "length", + } +} + +func (a *Address) beforeSave() {} + +// +checklocksignore +func (a *Address) StateSave(stateSinkObject state.Sink) { + a.beforeSave() + stateSinkObject.Save(0, &a.addr) + stateSinkObject.Save(1, &a.length) +} + +func (a *Address) afterLoad() {} + +// +checklocksignore +func (a *Address) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &a.addr) + stateSourceObject.Load(1, &a.length) +} + +func (m *AddressMask) StateTypeName() string { + return "pkg/tcpip.AddressMask" +} + +func (m *AddressMask) StateFields() []string { + return []string{ + "mask", + } +} + +func (m *AddressMask) beforeSave() {} + +// +checklocksignore +func (m *AddressMask) StateSave(stateSinkObject state.Sink) { + m.beforeSave() + stateSinkObject.Save(0, &m.mask) +} + +func (m *AddressMask) afterLoad() {} + +// +checklocksignore +func (m *AddressMask) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &m.mask) +} + func (f *FullAddress) StateTypeName() string { return "pkg/tcpip.FullAddress" } @@ -1155,6 +1248,7 @@ func (f *FullAddress) StateFields() []string { "NIC", "Addr", "Port", + "LinkAddr", } } @@ -1166,6 +1260,7 @@ func (f *FullAddress) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(0, &f.NIC) stateSinkObject.Save(1, &f.Addr) stateSinkObject.Save(2, &f.Port) + stateSinkObject.Save(3, &f.LinkAddr) } func (f *FullAddress) afterLoad() {} @@ -1175,6 +1270,7 @@ func (f *FullAddress) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(0, &f.NIC) stateSourceObject.Load(1, &f.Addr) stateSourceObject.Load(2, &f.Port) + stateSourceObject.Load(3, &f.LinkAddr) } func (s *SendableControlMessages) StateTypeName() string { @@ -1627,6 +1723,34 @@ func (src *TransportEndpointStats) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(5, &src.WriteErrors) } +func (a *AddressWithPrefix) StateTypeName() string { + return "pkg/tcpip.AddressWithPrefix" +} + +func (a *AddressWithPrefix) StateFields() []string { + return []string{ + "Address", + "PrefixLen", + } +} + +func (a *AddressWithPrefix) beforeSave() {} + +// +checklocksignore +func (a *AddressWithPrefix) StateSave(stateSinkObject state.Sink) { + a.beforeSave() + stateSinkObject.Save(0, &a.Address) + stateSinkObject.Save(1, &a.PrefixLen) +} + +func (a *AddressWithPrefix) afterLoad() {} + +// +checklocksignore +func (a *AddressWithPrefix) StateLoad(stateSourceObject state.Source) { + stateSourceObject.Load(0, &a.Address) + stateSourceObject.Load(1, &a.PrefixLen) +} + func init() { state.Register((*ErrAborted)(nil)) state.Register((*ErrAddressFamilyNotSupported)(nil)) @@ -1655,6 +1779,8 @@ func init() { state.Register((*ErrNoBufferSpace)(nil)) state.Register((*ErrNoPortAvailable)(nil)) state.Register((*ErrHostUnreachable)(nil)) + state.Register((*ErrHostDown)(nil)) + state.Register((*ErrNoNet)(nil)) state.Register((*ErrNoSuchFile)(nil)) state.Register((*ErrNotConnected)(nil)) state.Register((*ErrNotPermitted)(nil)) @@ -1676,6 +1802,8 @@ func init() { state.Register((*SockError)(nil)) state.Register((*stdClock)(nil)) state.Register((*MonotonicTime)(nil)) + state.Register((*Address)(nil)) + state.Register((*AddressMask)(nil)) state.Register((*FullAddress)(nil)) state.Register((*SendableControlMessages)(nil)) state.Register((*ReceivableControlMessages)(nil)) @@ -1690,4 +1818,5 @@ func init() { state.Register((*ReadErrors)(nil)) state.Register((*WriteErrors)(nil)) state.Register((*TransportEndpointStats)(nil)) + state.Register((*AddressWithPrefix)(nil)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go index 8c737d51f7..3bebf8d560 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/icmp/endpoint.go @@ -19,7 +19,7 @@ import ( "io" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/checksum" @@ -342,7 +342,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp return 0, &tcpip.ErrMessageTooLong{} } - v := bufferv2.NewView(p.Len()) + v := buffer.NewView(p.Len()) defer v.Release() if _, err := io.CopyN(v, p, int64(p.Len())); err != nil { return 0, &tcpip.ErrBadBuffer{} @@ -406,12 +406,12 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error { return e.net.GetSockOpt(opt) } -func send4(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *bufferv2.View, maxHeaderLength uint16) tcpip.Error { +func send4(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *buffer.View, maxHeaderLength uint16) tcpip.Error { if data.Size() < header.ICMPv4MinimumSize { return &tcpip.ErrInvalidEndpointState{} } - pkt := ctx.TryNewPacketBuffer(header.ICMPv4MinimumSize+int(maxHeaderLength), bufferv2.Buffer{}) + pkt := ctx.TryNewPacketBuffer(header.ICMPv4MinimumSize+int(maxHeaderLength), buffer.Buffer{}) if pkt.IsNil() { return &tcpip.ErrWouldBlock{} } @@ -448,12 +448,12 @@ func send4(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *buffer return nil } -func send6(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *bufferv2.View, src, dst tcpip.Address, maxHeaderLength uint16) tcpip.Error { +func send6(s *stack.Stack, ctx *network.WriteContext, ident uint16, data *buffer.View, src, dst tcpip.Address, maxHeaderLength uint16) tcpip.Error { if data.Size() < header.ICMPv6EchoMinimumSize { return &tcpip.ErrInvalidEndpointState{} } - pkt := ctx.TryNewPacketBuffer(header.ICMPv6MinimumSize+int(maxHeaderLength), bufferv2.Buffer{}) + pkt := ctx.TryNewPacketBuffer(header.ICMPv6MinimumSize+int(maxHeaderLength), buffer.Buffer{}) if pkt.IsNil() { return &tcpip.ErrWouldBlock{} } @@ -641,7 +641,7 @@ func (e *endpoint) isBroadcastOrMulticast(nicID tcpip.NICID, addr tcpip.Address) // Bind binds the endpoint to a specific local address and port. // Specifying a NIC is optional. func (e *endpoint) Bind(addr tcpip.FullAddress) tcpip.Error { - if len(addr.Addr) != 0 && e.isBroadcastOrMulticast(addr.NIC, addr.Addr) { + if addr.Addr.BitLen() != 0 && e.isBroadcastOrMulticast(addr.NIC, addr.Addr) { return &tcpip.ErrBadLocalAddress{} } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go index fa062ea9d3..880099f1d6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint.go @@ -20,7 +20,7 @@ import ( "fmt" "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -265,14 +265,14 @@ func (c *WriteContext) PacketInfo() WritePacketInfo { // // If this method returns nil, the caller should wait for the endpoint to become // writable. -func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data bufferv2.Buffer) stack.PacketBufferPtr { +func (c *WriteContext) TryNewPacketBuffer(reserveHdrBytes int, data buffer.Buffer) stack.PacketBufferPtr { e := c.e e.sendBufferSizeInUseMu.Lock() defer e.sendBufferSizeInUseMu.Unlock() if !e.hasSendSpaceRLocked() { - return stack.PacketBufferPtr{} + return nil } // Note that we allow oversubscription - if there is any space at all in the @@ -448,7 +448,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext // If a local address is not specified, then we need to make sure the // bound address belongs to the specified local interface. - if len(pktInfoAddr) == 0 { + if pktInfoAddr.BitLen() == 0 { // If the bound interface is different from the specified local // interface, the bound address obviously does not belong to the // specified local interface. @@ -457,7 +457,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext if info.BindNICID != 0 && info.BindNICID != pktInfoNICID { return WriteContext{}, &tcpip.ErrHostUnreachable{} } - if len(info.ID.LocalAddress) != 0 && e.stack.CheckLocalAddress(pktInfoNICID, header.IPv6ProtocolNumber, info.ID.LocalAddress) == 0 { + if info.ID.LocalAddress.BitLen() != 0 && e.stack.CheckLocalAddress(pktInfoNICID, header.IPv6ProtocolNumber, info.ID.LocalAddress) == 0 { return WriteContext{}, &tcpip.ErrBadLocalAddress{} } } @@ -465,7 +465,7 @@ func (e *Endpoint) AcquireContextForWrite(opts tcpip.WriteOptions) (WriteContext nicID = pktInfoNICID } - if len(pktInfoAddr) != 0 { + if pktInfoAddr.BitLen() != 0 { // The local address must belong to the stack. If an outgoing interface // is specified as a result of binding the endpoint to a device, or // specifying the outgoing interface in the destination address/pkt info @@ -566,18 +566,18 @@ func (e *Endpoint) Disconnect() { // // +checklocksread:e.mu func (e *Endpoint) connectRouteRLocked(nicID tcpip.NICID, localAddr tcpip.Address, addr tcpip.FullAddress, netProto tcpip.NetworkProtocolNumber) (*stack.Route, tcpip.NICID, tcpip.Error) { - if len(localAddr) == 0 { + if localAddr.BitLen() == 0 { localAddr = e.Info().ID.LocalAddress if e.isBroadcastOrMulticast(nicID, netProto, localAddr) { // A packet can only originate from a unicast address (i.e., an interface). - localAddr = "" + localAddr = tcpip.Address{} } if header.IsV4MulticastAddress(addr.Addr) || header.IsV6MulticastAddress(addr.Addr) { if nicID == 0 { nicID = e.multicastNICID } - if localAddr == "" && nicID == 0 { + if localAddr == (tcpip.Address{}) && nicID == 0 { localAddr = e.multicastAddr } } @@ -634,7 +634,7 @@ func (e *Endpoint) ConnectAndThen(addr tcpip.FullAddress, f func(netProto tcpip. return err } - r, nicID, err := e.connectRouteRLocked(nicID, "", addr, netProto) + r, nicID, err := e.connectRouteRLocked(nicID, tcpip.Address{}, addr, netProto) if err != nil { return err } @@ -726,7 +726,7 @@ func (e *Endpoint) BindAndThen(addr tcpip.FullAddress, f func(tcpip.NetworkProto } nicID := addr.NIC - if len(addr.Addr) != 0 && !e.isBroadcastOrMulticast(addr.NIC, netProto, addr.Addr) { + if addr.Addr.BitLen() != 0 && !e.isBroadcastOrMulticast(addr.NIC, netProto, addr.Addr) { nicID = e.stack.CheckLocalAddress(nicID, netProto, addr.Addr) if nicID == 0 { return &tcpip.ErrBadLocalAddress{} @@ -887,8 +887,8 @@ func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { nic := v.NIC addr := fa.Addr - if nic == 0 && addr == "" { - e.multicastAddr = "" + if nic == 0 && addr == (tcpip.Address{}) { + e.multicastAddr = tcpip.Address{} e.multicastNICID = 0 break } @@ -920,7 +920,7 @@ func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { if v.InterfaceAddr.Unspecified() { if nicID == 0 { - if r, err := e.stack.FindRoute(0, "", v.MulticastAddr, e.netProto, false /* multicastLoop */); err == nil { + if r, err := e.stack.FindRoute(0, tcpip.Address{}, v.MulticastAddr, e.netProto, false /* multicastLoop */); err == nil { nicID = r.NICID() r.Release() } @@ -955,7 +955,7 @@ func (e *Endpoint) SetSockOpt(opt tcpip.SettableSocketOption) tcpip.Error { nicID := v.NIC if v.InterfaceAddr.Unspecified() { if nicID == 0 { - if r, err := e.stack.FindRoute(0, "", v.MulticastAddr, e.netProto, false /* multicastLoop */); err == nil { + if r, err := e.stack.FindRoute(0, tcpip.Address{}, v.MulticastAddr, e.netProto, false /* multicastLoop */); err == nil { nicID = r.NICID() r.Release() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint_state.go index 68bd1fbf67..d495029671 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/internal/network/endpoint_state.go @@ -40,7 +40,7 @@ func (e *Endpoint) Resume(s *stack.Stack) { switch state := e.State(); state { case transport.DatagramEndpointStateInitial, transport.DatagramEndpointStateClosed: case transport.DatagramEndpointStateBound: - if len(info.ID.LocalAddress) != 0 && !e.isBroadcastOrMulticast(info.RegisterNICID, e.effectiveNetProto, info.ID.LocalAddress) { + if info.ID.LocalAddress.BitLen() != 0 && !e.isBroadcastOrMulticast(info.RegisterNICID, e.effectiveNetProto, info.ID.LocalAddress) { if e.stack.CheckLocalAddress(info.RegisterNICID, e.effectiveNetProto, info.ID.LocalAddress) == 0 { panic(fmt.Sprintf("got e.stack.CheckLocalAddress(%d, %d, %s) = 0, want != 0", info.RegisterNICID, e.effectiveNetProto, info.ID.LocalAddress)) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go index a39af2dbb7..46a7f82a0b 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/packet/endpoint.go @@ -28,7 +28,7 @@ import ( "io" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -219,7 +219,7 @@ func (ep *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tc var remote tcpip.LinkAddress if to := opts.To; to != nil { - remote = tcpip.LinkAddress(to.Addr) + remote = to.LinkAddr if n := to.NIC; n != 0 { nicID = n @@ -239,7 +239,7 @@ func (ep *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tc return 0, &tcpip.ErrMessageTooLong{} } - var payload bufferv2.Buffer + var payload buffer.Buffer if _, err := payload.WriteFromReader(p, int64(p.Len())); err != nil { return 0, &tcpip.ErrBadBuffer{} } @@ -451,7 +451,7 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, netProto tcpip.NetworkProtoc if len(pkt.LinkHeader().Slice()) != 0 { hdr := header.Ethernet(pkt.LinkHeader().Slice()) - rcvdPkt.senderAddr.Addr = tcpip.Address(hdr.SourceAddress()) + rcvdPkt.senderAddr.LinkAddr = hdr.SourceAddress() } // Raw packet endpoints include link-headers in received packets. diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go index dabe6587a8..476932d2ba 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/raw/endpoint.go @@ -30,7 +30,7 @@ import ( "io" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/checksum" @@ -312,7 +312,7 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp if opts.To != nil { // Raw sockets do not support sending to a IPv4 address on a IPv6 endpoint. - if netProto == header.IPv6ProtocolNumber && len(opts.To.Addr) != header.IPv6AddressSize { + if netProto == header.IPv6ProtocolNumber && opts.To.Addr.BitLen() != header.IPv6AddressSizeBits { return 0, &tcpip.ErrInvalidOptionValue{} } } @@ -356,7 +356,7 @@ func (e *endpoint) write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp return 0, &tcpip.ErrMessageTooLong{} } - var payload bufferv2.Buffer + var payload buffer.Buffer defer payload.Release() if _, err := payload.WriteFromReader(p, int64(p.Len())); err != nil { return 0, &tcpip.ErrBadBuffer{} @@ -399,7 +399,7 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) tcpip.Error { netProto := e.net.NetProto() // Raw sockets do not support connecting to a IPv4 address on a IPv6 endpoint. - if netProto == header.IPv6ProtocolNumber && len(addr.Addr) != header.IPv6AddressSize { + if netProto == header.IPv6ProtocolNumber && addr.Addr.BitLen() != header.IPv6AddressSizeBits { return &tcpip.ErrAddressFamilyNotSupported{} } @@ -638,7 +638,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { } // If bound to an address, only accept data for that address. - if info.BindAddr != "" && info.BindAddr != dstAddr { + if info.BindAddr != (tcpip.Address{}) && info.BindAddr != dstAddr { return false } default: @@ -680,15 +680,15 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { // TODO(https://gvisor.dev/issue/6517): Avoid the copy once S/R supports // overlapping slices. transportHeader := pkt.TransportHeader().Slice() - var combinedBuf bufferv2.Buffer + var combinedBuf buffer.Buffer defer combinedBuf.Release() switch info.NetProto { case header.IPv4ProtocolNumber: networkHeader := pkt.NetworkHeader().Slice() - headers := bufferv2.NewView(len(networkHeader) + len(transportHeader)) + headers := buffer.NewView(len(networkHeader) + len(transportHeader)) headers.Write(networkHeader) headers.Write(transportHeader) - combinedBuf = bufferv2.MakeWithView(headers) + combinedBuf = buffer.MakeWithView(headers) pktBuf := pkt.Data().ToBuffer() combinedBuf.Merge(&pktBuf) case header.IPv6ProtocolNumber: @@ -702,7 +702,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) { } } - combinedBuf = bufferv2.MakeWithView(pkt.TransportHeader().View()) + combinedBuf = buffer.MakeWithView(pkt.TransportHeader().View()) pktBuf := pkt.Data().ToBuffer() combinedBuf.Merge(&pktBuf) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go index 6e495b9201..0006f25036 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/accept.go @@ -146,8 +146,8 @@ func (l *listenContext) cookieHash(id stack.TransportEndpointID, ts uint32, nonc // It never returns an error. l.hasher.Write(payload[:]) l.hasher.Write(l.nonce[nonceIndex][:]) - l.hasher.Write([]byte(id.LocalAddress)) - l.hasher.Write([]byte(id.RemoteAddress)) + l.hasher.Write(id.LocalAddress.AsSlice()) + l.hasher.Write(id.RemoteAddress.AsSlice()) // Finalize the calculation of the hash and return the first 4 bytes. h := l.hasher.Sum(nil) @@ -589,7 +589,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) tcpip.Err netProtos := []tcpip.NetworkProtocolNumber{s.pkt.NetworkProtocolNumber} // If the local address is an IPv4 Address then also look for IPv6 // dual stack endpoints. - if s.id.LocalAddress.To4() != "" { + if s.id.LocalAddress.To4() != (tcpip.Address{}) { netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber} } for _, netProto := range netProtos { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go index 451f2cef10..0f900174a2 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/connect.go @@ -128,9 +128,15 @@ func maybeFailTimerHandler(e *endpoint, f func() tcpip.Error) func() { e.mu.Lock() if err := f(); err != nil { e.lastErrorMu.Lock() + // If the handler timed out and we have a lastError recorded (maybe due + // to an ICMP message received), promote it to be the hard error. + if _, isTimeout := err.(*tcpip.ErrTimeout); e.lastError != nil && isTimeout { + e.hardError = e.lastError + } else { + e.hardError = err + } e.lastError = err e.lastErrorMu.Unlock() - e.hardError = err e.cleanupLocked() e.setEndpointState(StateError) e.mu.Unlock() @@ -234,8 +240,8 @@ func generateSecureISN(id stack.TransportEndpointID, clock tcpip.Clock, seed uin // Per hash.Hash.Writer: // // It never returns an error. - _, _ = isnHasher.Write([]byte(id.LocalAddress)) - _, _ = isnHasher.Write([]byte(id.RemoteAddress)) + _, _ = isnHasher.Write(id.LocalAddress.AsSlice()) + _, _ = isnHasher.Write(id.RemoteAddress.AsSlice()) portBuf := make([]byte, 2) binary.LittleEndian.PutUint16(portBuf, id.LocalPort) _, _ = isnHasher.Write(portBuf) @@ -1064,7 +1070,7 @@ func (e *endpoint) transitionToStateCloseLocked() { // to any other listening endpoint. We reply with RST if we cannot find one. func (e *endpoint) tryDeliverSegmentFromClosedEndpoint(s *segment) { ep := e.stack.FindTransportEndpoint(e.NetProto, e.TransProto, e.TransportEndpointInfo.ID, s.pkt.NICID) - if ep == nil && e.NetProto == header.IPv6ProtocolNumber && e.TransportEndpointInfo.ID.LocalAddress.To4() != "" { + if ep == nil && e.NetProto == header.IPv6ProtocolNumber && e.TransportEndpointInfo.ID.LocalAddress.To4() != (tcpip.Address{}) { // Dual-stack socket, try IPv4. ep = e.stack.FindTransportEndpoint( header.IPv4ProtocolNumber, @@ -1392,13 +1398,13 @@ func (e *endpoint) handleTimeWaitSegments() (extendTimeWait bool, reuseTW func() if newSyn { info := e.TransportEndpointInfo newID := info.ID - newID.RemoteAddress = "" + newID.RemoteAddress = tcpip.Address{} newID.RemotePort = 0 netProtos := []tcpip.NetworkProtocolNumber{info.NetProto} // If the local address is an IPv4 address then also // look for IPv6 dual stack endpoints that might be // listening on the local address. - if newID.LocalAddress.To4() != "" { + if newID.LocalAddress.To4() != (tcpip.Address{}) { netProtos = []tcpip.NetworkProtocolNumber{header.IPv4ProtocolNumber, header.IPv6ProtocolNumber} } for _, netProto := range netProtos { diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go index 1c7a3c44eb..b647b78188 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/dispatcher.go @@ -504,7 +504,7 @@ func (j jenkinsHasher) hash(id stack.TransportEndpointID) uint32 { h := jenkins.Sum32(j.seed) h.Write(payload[:]) - h.Write([]byte(id.LocalAddress)) - h.Write([]byte(id.RemoteAddress)) + h.Write(id.LocalAddress.AsSlice()) + h.Write(id.RemoteAddress.AsSlice()) return h.Sum32() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go index 613c6c43eb..7ae7043ecd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint.go @@ -25,7 +25,7 @@ import ( "time" "gvisor.dev/gvisor/pkg/atomicbitops" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sleep" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" @@ -77,6 +77,17 @@ const ( SegOverheadFactor = 2 ) +type connDirectionState uint32 + +// Connection direction states used for directionState checks in endpoint struct +// to detect half-closed connection and deliver POLLRDHUP +const ( + connDirectionStateOpen connDirectionState = 0 + connDirectionStateRcvClosed connDirectionState = 1 + connDirectionStateSndClosed connDirectionState = 2 + connDirectionStateAll connDirectionState = connDirectionStateOpen | connDirectionStateRcvClosed | connDirectionStateSndClosed +) + // connected returns true when s is one of the states representing an // endpoint connected to a peer. func (s EndpointState) connected() bool { @@ -399,6 +410,10 @@ type endpoint struct { // methods. state atomicbitops.Uint32 `state:".(EndpointState)"` + // connectionDirectionState holds current state of send and receive, + // accessed atomically + connectionDirectionState atomicbitops.Uint32 + // origEndpointState is only used during a restore phase to save the // endpoint state at restore time as the socket is moved to it's correct // state. @@ -940,6 +955,9 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { if e.sndQueueInfo.SndClosed || e.sndQueueInfo.SndBufUsed < sndBufSize { result |= waiter.WritableEvents } + if e.sndQueueInfo.SndClosed { + e.updateConnDirectionState(connDirectionStateSndClosed) + } e.sndQueueInfo.sndQueueMu.Unlock() } @@ -949,10 +967,18 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { if e.RcvBufUsed > 0 || e.RcvClosed { result |= waiter.ReadableEvents } + if e.RcvClosed { + e.updateConnDirectionState(connDirectionStateRcvClosed) + } e.rcvQueueMu.Unlock() } } + // Determine whether endpoint is half-closed with rcv shutdown + if e.connDirectionState() == connDirectionStateRcvClosed { + result |= waiter.EventRdHUp + } + return result } @@ -1012,6 +1038,7 @@ func (e *endpoint) Abort() { switch state := e.EndpointState(); { case state.connected(): e.resetConnectionLocked(&tcpip.ErrAborted{}) + e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.ReadableEvents | waiter.WritableEvents) return } e.closeLocked() @@ -1526,7 +1553,7 @@ func (e *endpoint) isEndpointWritableLocked() (int, tcpip.Error) { // readFromPayloader reads a slice from the Payloader. // +checklocks:e.mu // +checklocks:e.sndQueueInfo.sndQueueMu -func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (bufferv2.Buffer, tcpip.Error) { +func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, avail int) (buffer.Buffer, tcpip.Error) { // We can release locks while copying data. // // This is not possible if atomic is set, because we can't allow the @@ -1541,7 +1568,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, } // Fetch data. - var payload bufferv2.Buffer + var payload buffer.Buffer if l := p.Len(); l < avail { avail = l } @@ -1550,7 +1577,7 @@ func (e *endpoint) readFromPayloader(p tcpip.Payloader, opts tcpip.WriteOptions, } if _, err := payload.WriteFromReader(p, int64(avail)); err != nil { payload.Release() - return bufferv2.Buffer{}, &tcpip.ErrBadBuffer{} + return buffer.Buffer{}, &tcpip.ErrBadBuffer{} } return payload, nil } @@ -1599,7 +1626,6 @@ func (e *endpoint) queueSegment(p tcpip.Payloader, opts tcpip.WriteOptions) (*se size := int(buf.Size()) s := newOutgoingSegment(e.TransportEndpointInfo.ID, e.stack.Clock(), buf) e.sndQueueInfo.SndBufUsed += size - s.IncRef() e.snd.writeList.PushBack(s) return s, size, nil @@ -1617,9 +1643,6 @@ func (e *endpoint) Write(p tcpip.Payloader, opts tcpip.WriteOptions) (int64, tcp // Return if either we didn't queue anything or if an error occurred while // attempting to queue data. nextSeg, n, err := e.queueSegment(p, opts) - if nextSeg != nil { - defer nextSeg.DecRef() - } if n == 0 || err != nil { return 0, err } @@ -2225,8 +2248,8 @@ func (e *endpoint) registerEndpoint(addr tcpip.FullAddress, netProto tcpip.Netwo h := jenkins.Sum32(e.protocol.portOffsetSecret) for _, s := range [][]byte{ - []byte(e.ID.LocalAddress), - []byte(e.ID.RemoteAddress), + e.ID.LocalAddress.AsSlice(), + e.ID.RemoteAddress.AsSlice(), portBuf, } { // Per io.Writer.Write: @@ -2424,6 +2447,9 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool) tcpip.Error { e.setEndpointState(StateConnecting) if err := e.registerEndpoint(addr, netProto, r.NICID()); err != nil { e.setEndpointState(oldState) + if _, ok := err.(*tcpip.ErrPortInUse); ok { + return &tcpip.ErrBadLocalAddress{} + } return err } @@ -2509,7 +2535,13 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { } // Wake up any readers that maybe waiting for the stream to become // readable. - e.waiterQueue.Notify(waiter.ReadableEvents) + events := waiter.ReadableEvents + if e.shutdownFlags&tcpip.ShutdownWrite == 0 { + // If ShutdownWrite is not set, write end won't close and + // we end up with a half-closed connection + events |= waiter.EventRdHUp + } + e.waiterQueue.Notify(events) } // Close for write. @@ -2525,7 +2557,7 @@ func (e *endpoint) shutdownLocked(flags tcpip.ShutdownFlags) tcpip.Error { } // Queue fin segment. - s := newOutgoingSegment(e.TransportEndpointInfo.ID, e.stack.Clock(), bufferv2.Buffer{}) + s := newOutgoingSegment(e.TransportEndpointInfo.ID, e.stack.Clock(), buffer.Buffer{}) e.snd.writeList.PushBack(s) // Mark endpoint as closed. e.sndQueueInfo.SndClosed = true @@ -2597,6 +2629,7 @@ func (e *endpoint) listen(backlog int) tcpip.Error { } e.shutdownFlags = 0 + e.updateConnDirectionState(connDirectionStateOpen) e.rcvQueueMu.Lock() e.RcvClosed = false e.rcvQueueMu.Unlock() @@ -2713,7 +2746,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { // v6only set to false. if netProto == header.IPv6ProtocolNumber { stackHasV4 := e.stack.CheckNetworkProtocol(header.IPv4ProtocolNumber) - alsoBindToV4 := !e.ops.GetV6Only() && addr.Addr == "" && stackHasV4 + alsoBindToV4 := !e.ops.GetV6Only() && addr.Addr == tcpip.Address{} && stackHasV4 if alsoBindToV4 { netProtos = append(netProtos, header.IPv4ProtocolNumber) } @@ -2722,7 +2755,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) { var nic tcpip.NICID // If an address is specified, we must ensure that it's one of our // local addresses. - if len(addr.Addr) != 0 { + if addr.Addr.Len() != 0 { nic = e.stack.CheckLocalAddress(addr.NIC, netProto, addr.Addr) if nic == 0 { return &tcpip.ErrBadLocalAddress{} @@ -2909,6 +2942,16 @@ func (e *endpoint) HandleError(transErr stack.TransportError, pkt stack.PacketBu e.onICMPError(&tcpip.ErrHostUnreachable{}, transErr, pkt) case stack.DestinationNetworkUnreachableTransportError: e.onICMPError(&tcpip.ErrNetworkUnreachable{}, transErr, pkt) + case stack.DestinationPortUnreachableTransportError: + e.onICMPError(&tcpip.ErrConnectionRefused{}, transErr, pkt) + case stack.DestinationProtoUnreachableTransportError: + e.onICMPError(&tcpip.ErrUnknownProtocolOption{}, transErr, pkt) + case stack.SourceRouteFailedTransportError: + e.onICMPError(&tcpip.ErrNotSupported{}, transErr, pkt) + case stack.SourceHostIsolatedTransportError: + e.onICMPError(&tcpip.ErrNoNet{}, transErr, pkt) + case stack.DestinationHostDownTransportError: + e.onICMPError(&tcpip.ErrHostDown{}, transErr, pkt) } } @@ -3009,6 +3052,16 @@ func (e *endpoint) maxReceiveBufferSize() int { return rs.Max } +// directionState returns the close state of send and receive part of the endpoint +func (e *endpoint) connDirectionState() connDirectionState { + return connDirectionState(e.connectionDirectionState.Load()) +} + +// updateDirectionState updates the close state of send and receive part of the endpoint +func (e *endpoint) updateConnDirectionState(state connDirectionState) connDirectionState { + return connDirectionState(e.connectionDirectionState.Swap(uint32(e.connDirectionState() | state))) +} + // rcvWndScaleForHandshake computes the receive window scale to offer to the // peer when window scaling is enabled (true by default). If auto-tuning is // disabled then the window scaling factor is based on the size of the diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go index 4e48de8e8d..8382b35b99 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/endpoint_state.go @@ -38,7 +38,7 @@ func (e *endpoint) beforeSave() { case epState == StateInitial || epState == StateBound: case epState.connected() || epState.handshake(): if !e.route.HasSaveRestoreCapability() { - if !e.route.HasDisconncetOkCapability() { + if !e.route.HasDisconnectOkCapability() { panic(&tcpip.ErrSaveRejection{ Err: fmt.Errorf("endpoint cannot be saved in connected state: local %s:%d, remote %s:%d", e.TransportEndpointInfo.ID.LocalAddress, e.TransportEndpointInfo.ID.LocalPort, e.TransportEndpointInfo.ID.RemoteAddress, e.TransportEndpointInfo.ID.RemotePort), }) @@ -56,10 +56,6 @@ func (e *endpoint) beforeSave() { default: panic(fmt.Sprintf("endpoint in unknown state %v", e.EndpointState())) } - - if e.waiterQueue != nil && !e.waiterQueue.IsEmpty() { - panic("endpoint still has waiters upon save") - } } // saveEndpoints is invoked by stateify. @@ -136,24 +132,6 @@ func (e *endpoint) Resume(s *stack.Stack) { e.protocol = protocolFromStack(s) e.ops.InitHandler(e, e.stack, GetTCPSendBufferLimits, GetTCPReceiveBufferLimits) e.segmentQueue.thaw() - epState := EndpointState(e.origEndpointState) - switch epState { - case StateInitial, StateBound, StateListen, StateConnecting, StateEstablished: - var ss tcpip.TCPSendBufferSizeRangeOption - if err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil { - sendBufferSize := e.getSendBufferSize() - if sendBufferSize < ss.Min || sendBufferSize > ss.Max { - panic(fmt.Sprintf("endpoint sendBufferSize %d is outside the min and max allowed [%d, %d]", sendBufferSize, ss.Min, ss.Max)) - } - } - - var rs tcpip.TCPReceiveBufferSizeRangeOption - if err := e.stack.TransportProtocolOption(ProtocolNumber, &rs); err == nil { - if rcvBufSize := e.ops.GetReceiveBufferSize(); rcvBufSize < int64(rs.Min) || rcvBufSize > int64(rs.Max) { - panic(fmt.Sprintf("endpoint rcvBufSize %d is outside the min and max allowed [%d, %d]", rcvBufSize, rs.Min, rs.Max)) - } - } - } bind := func() { e.mu.Lock() @@ -180,17 +158,21 @@ func (e *endpoint) Resume(s *stack.Stack) { e.setEndpointState(StateBound) } + epState := EndpointState(e.origEndpointState) switch { case epState.connected(): bind() - if len(e.connectingAddress) == 0 { + if e.connectingAddress.BitLen() == 0 { e.connectingAddress = e.TransportEndpointInfo.ID.RemoteAddress // This endpoint is accepted by netstack but not yet by // the app. If the endpoint is IPv6 but the remote // address is IPv4, we need to connect as IPv6 so that // dual-stack mode can be properly activated. - if e.NetProto == header.IPv6ProtocolNumber && len(e.TransportEndpointInfo.ID.RemoteAddress) != header.IPv6AddressSize { - e.connectingAddress = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + e.TransportEndpointInfo.ID.RemoteAddress + if e.NetProto == header.IPv6ProtocolNumber && e.TransportEndpointInfo.ID.RemoteAddress.BitLen() != header.IPv6AddressSizeBits { + e.connectingAddress = tcpip.AddrFrom16Slice(append( + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff}, + e.TransportEndpointInfo.ID.RemoteAddress.AsSlice()..., + )) } } // Reset the scoreboard to reinitialize the sack information as diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go index e38ecddea0..81059d6a34 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/protocol.go @@ -185,8 +185,8 @@ func (p *protocol) tsOffset(src, dst tcpip.Address) tcp.TSOffset { // Per hash.Hash.Writer: // // It never returns an error. - _, _ = h.Write([]byte(src)) - _, _ = h.Write([]byte(dst)) + _, _ = h.Write(src.AsSlice()) + _, _ = h.Write(dst.AsSlice()) return tcp.NewTSOffset(h.Sum32()) } @@ -521,6 +521,7 @@ func NewProtocol(s *stack.Stack) stack.TransportProtocol { }, congestionControl: ccReno, availableCongestionControl: []string{ccReno, ccCubic}, + moderateReceiveBuffer: true, lingerTimeout: DefaultTCPLingerTimeout, timeWaitTimeout: DefaultTCPTimeWaitTimeout, timeWaitReuse: tcpip.TCPTimeWaitReuseLoopbackOnly, diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go index 98e4d12fb7..292e0d0e63 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/rcv.go @@ -293,6 +293,7 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum r.pendingRcvdSegments[i] = nil } r.pendingRcvdSegments = r.pendingRcvdSegments[:first] + r.ep.updateConnDirectionState(connDirectionStateRcvClosed) return true } @@ -480,13 +481,26 @@ func (r *receiver) handleRcvdSegment(s *segment) (drop bool, err tcpip.Error) { // Defer segment processing if it can't be consumed now. if !r.consumeSegment(s, segSeq, segLen) { if segLen > 0 || s.flags.Contains(header.TCPFlagFin) { - // We only store the segment if it's within our buffer size limit. + // We only store the segment if it's within our buffer + // size limit. // - // Only use 75% of the receive buffer queue for out-of-order - // segments. This ensures that we always leave some space for the inorder - // segments to arrive allowing pending segments to be processed and + // Only use 75% of the receive buffer queue for + // out-of-order segments. This ensures that we always + // leave some space for the inorder segments to arrive + // allowing pending segments to be processed and // delivered to the user. - if rcvBufSize := r.ep.ops.GetReceiveBufferSize(); rcvBufSize > 0 && (r.PendingBufUsed+int(segLen)) < int(rcvBufSize)>>2 { + // + // The ratio must be at least 50% (the size of rwnd) to + // leave space for retransmitted dropped packets. 51% + // would make recovery slow when there are multiple + // drops by necessitating multiple round trips. 100% + // would enable the buffer to be totally full of + // out-of-order data and stall the connection. + // + // An ideal solution is to ensure that there are at + // least N bytes free when N bytes are missing, but we + // don't have that computed at this point in the stack. + if rcvBufSize := r.ep.ops.GetReceiveBufferSize(); rcvBufSize > 0 && (r.PendingBufUsed+int(segLen)) < int(rcvBufSize-rcvBufSize/4) { r.ep.rcvQueueMu.Lock() r.PendingBufUsed += s.segMemSize() r.ep.rcvQueueMu.Unlock() diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go index 1bb3a490b3..df52065826 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment.go @@ -18,7 +18,7 @@ import ( "fmt" "io" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" @@ -125,7 +125,7 @@ func newIncomingSegment(id stack.TransportEndpointID, clock tcpip.Clock, pkt sta return s, nil } -func newOutgoingSegment(id stack.TransportEndpointID, clock tcpip.Clock, buf bufferv2.Buffer) *segment { +func newOutgoingSegment(id stack.TransportEndpointID, clock tcpip.Clock, buf buffer.Buffer) *segment { s := newSegment() s.id = id s.rcvdTime = clock.NowMonotonic() @@ -194,6 +194,7 @@ func (s *segment) DecRef() { } } s.pkt.DecRef() + s.pkt = nil segmentPool.Put(s) }) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go index 568c7d358e..53839387ef 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/segment_queue.go @@ -37,10 +37,8 @@ func (q *segmentQueue) emptyLocked() bool { // empty determines if the queue is empty. func (q *segmentQueue) empty() bool { q.mu.Lock() - r := q.emptyLocked() - q.mu.Unlock() - - return r + defer q.mu.Unlock() + return q.emptyLocked() } // enqueue adds the given segment to the queue. @@ -54,7 +52,10 @@ func (q *segmentQueue) enqueue(s *segment) bool { // avoid lock order inversion. bufSz := q.ep.ops.GetReceiveBufferSize() used := q.ep.receiveMemUsed() + q.mu.Lock() + defer q.mu.Unlock() + // Allow zero sized segments (ACK/FIN/RSTs etc even if the segment queue // is currently full). allow := (used <= int(bufSz) || s.payloadSize() == 0) && !q.frozen @@ -65,7 +66,6 @@ func (q *segmentQueue) enqueue(s *segment) bool { // Set the owner now that the endpoint owns the segment. s.setOwner(q.ep, recvQ) } - q.mu.Unlock() return allow } @@ -75,11 +75,12 @@ func (q *segmentQueue) enqueue(s *segment) bool { // the ref count when done. func (q *segmentQueue) dequeue() *segment { q.mu.Lock() + defer q.mu.Unlock() + s := q.list.Front() if s != nil { q.list.Remove(s) } - q.mu.Unlock() return s } @@ -89,14 +90,14 @@ func (q *segmentQueue) dequeue() *segment { // queue till the queue is unfroze with a corresponding segmentQueue.thaw call. func (q *segmentQueue) freeze() { q.mu.Lock() + defer q.mu.Unlock() q.frozen = true - q.mu.Unlock() } // thaw unfreezes a previously frozen queue using segmentQueue.freeze() and // allows new segments to be queued again. func (q *segmentQueue) thaw() { q.mu.Lock() + defer q.mu.Unlock() q.frozen = false - q.mu.Unlock() } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go index d762c410e7..a90f46e3fd 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/snd.go @@ -34,6 +34,9 @@ const ( // MaxRTO is the maximum allowed value for the retransmit timeout. MaxRTO = 120 * time.Second + // MinSRTT is the minimum allowed value for smoothed RTT. + MinSRTT = 1 * time.Millisecond + // InitialCwnd is the initial congestion window. InitialCwnd = 10 @@ -384,6 +387,10 @@ func (s *sender) updateRTO(rtt time.Duration) { } } + if s.rtt.TCPRTTState.SRTT < MinSRTT { + s.rtt.TCPRTTState.SRTT = MinSRTT + } + s.RTO = s.rtt.TCPRTTState.SRTT + 4*s.rtt.TCPRTTState.RTTVar s.rtt.Unlock() if s.RTO < s.minRTO { @@ -792,6 +799,7 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se segEnd = seg.sequenceNumber.Add(1) // Update the state to reflect that we have now // queued a FIN. + s.ep.updateConnDirectionState(connDirectionStateSndClosed) switch s.ep.EndpointState() { case StateCloseWait: s.ep.setEndpointState(StateLastAck) diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go index 822371b22d..e40251b272 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_segment_refs.go @@ -43,7 +43,8 @@ type segmentRefs struct { // InitRefs initializes r with one reference and, if enabled, activates leak // checking. func (r *segmentRefs) InitRefs() { - r.refCount.Store(1) + + r.refCount.RacyStore(1) refs.Register(r) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go index 912b12a266..1f38a830f7 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/tcp/tcp_state_autogen.go @@ -340,6 +340,7 @@ func (e *endpoint) StateFields() []string { "ownedByUser", "rcvQueue", "state", + "connectionDirectionState", "boundNICID", "ipv4TTL", "ipv6HopLimit", @@ -399,45 +400,46 @@ func (e *endpoint) StateSave(stateSinkObject state.Sink) { stateSinkObject.Save(8, &e.rcvMemUsed) stateSinkObject.Save(9, &e.ownedByUser) stateSinkObject.Save(10, &e.rcvQueue) - stateSinkObject.Save(12, &e.boundNICID) - stateSinkObject.Save(13, &e.ipv4TTL) - stateSinkObject.Save(14, &e.ipv6HopLimit) - stateSinkObject.Save(15, &e.isConnectNotified) - stateSinkObject.Save(16, &e.h) - stateSinkObject.Save(17, &e.portFlags) - stateSinkObject.Save(18, &e.boundBindToDevice) - stateSinkObject.Save(19, &e.boundPortFlags) - stateSinkObject.Save(20, &e.boundDest) - stateSinkObject.Save(21, &e.effectiveNetProtos) - stateSinkObject.Save(22, &e.recentTSTime) - stateSinkObject.Save(23, &e.shutdownFlags) - stateSinkObject.Save(24, &e.tcpRecovery) - stateSinkObject.Save(25, &e.sack) - stateSinkObject.Save(26, &e.delay) - stateSinkObject.Save(27, &e.scoreboard) - stateSinkObject.Save(28, &e.segmentQueue) - stateSinkObject.Save(29, &e.userMSS) - stateSinkObject.Save(30, &e.maxSynRetries) - stateSinkObject.Save(31, &e.windowClamp) - stateSinkObject.Save(32, &e.sndQueueInfo) - stateSinkObject.Save(33, &e.cc) - stateSinkObject.Save(34, &e.keepalive) - stateSinkObject.Save(35, &e.userTimeout) - stateSinkObject.Save(36, &e.deferAccept) - stateSinkObject.Save(37, &e.acceptQueue) - stateSinkObject.Save(38, &e.rcv) - stateSinkObject.Save(39, &e.snd) - stateSinkObject.Save(40, &e.connectingAddress) - stateSinkObject.Save(41, &e.amss) - stateSinkObject.Save(42, &e.sendTOS) - stateSinkObject.Save(43, &e.gso) - stateSinkObject.Save(44, &e.stats) - stateSinkObject.Save(45, &e.tcpLingerTimeout) - stateSinkObject.Save(46, &e.closed) - stateSinkObject.Save(47, &e.txHash) - stateSinkObject.Save(48, &e.owner) - stateSinkObject.Save(49, &e.ops) - stateSinkObject.Save(50, &e.lastOutOfWindowAckTime) + stateSinkObject.Save(12, &e.connectionDirectionState) + stateSinkObject.Save(13, &e.boundNICID) + stateSinkObject.Save(14, &e.ipv4TTL) + stateSinkObject.Save(15, &e.ipv6HopLimit) + stateSinkObject.Save(16, &e.isConnectNotified) + stateSinkObject.Save(17, &e.h) + stateSinkObject.Save(18, &e.portFlags) + stateSinkObject.Save(19, &e.boundBindToDevice) + stateSinkObject.Save(20, &e.boundPortFlags) + stateSinkObject.Save(21, &e.boundDest) + stateSinkObject.Save(22, &e.effectiveNetProtos) + stateSinkObject.Save(23, &e.recentTSTime) + stateSinkObject.Save(24, &e.shutdownFlags) + stateSinkObject.Save(25, &e.tcpRecovery) + stateSinkObject.Save(26, &e.sack) + stateSinkObject.Save(27, &e.delay) + stateSinkObject.Save(28, &e.scoreboard) + stateSinkObject.Save(29, &e.segmentQueue) + stateSinkObject.Save(30, &e.userMSS) + stateSinkObject.Save(31, &e.maxSynRetries) + stateSinkObject.Save(32, &e.windowClamp) + stateSinkObject.Save(33, &e.sndQueueInfo) + stateSinkObject.Save(34, &e.cc) + stateSinkObject.Save(35, &e.keepalive) + stateSinkObject.Save(36, &e.userTimeout) + stateSinkObject.Save(37, &e.deferAccept) + stateSinkObject.Save(38, &e.acceptQueue) + stateSinkObject.Save(39, &e.rcv) + stateSinkObject.Save(40, &e.snd) + stateSinkObject.Save(41, &e.connectingAddress) + stateSinkObject.Save(42, &e.amss) + stateSinkObject.Save(43, &e.sendTOS) + stateSinkObject.Save(44, &e.gso) + stateSinkObject.Save(45, &e.stats) + stateSinkObject.Save(46, &e.tcpLingerTimeout) + stateSinkObject.Save(47, &e.closed) + stateSinkObject.Save(48, &e.txHash) + stateSinkObject.Save(49, &e.owner) + stateSinkObject.Save(50, &e.ops) + stateSinkObject.Save(51, &e.lastOutOfWindowAckTime) } // +checklocksignore @@ -453,45 +455,46 @@ func (e *endpoint) StateLoad(stateSourceObject state.Source) { stateSourceObject.Load(8, &e.rcvMemUsed) stateSourceObject.Load(9, &e.ownedByUser) stateSourceObject.LoadWait(10, &e.rcvQueue) - stateSourceObject.Load(12, &e.boundNICID) - stateSourceObject.Load(13, &e.ipv4TTL) - stateSourceObject.Load(14, &e.ipv6HopLimit) - stateSourceObject.Load(15, &e.isConnectNotified) - stateSourceObject.Load(16, &e.h) - stateSourceObject.Load(17, &e.portFlags) - stateSourceObject.Load(18, &e.boundBindToDevice) - stateSourceObject.Load(19, &e.boundPortFlags) - stateSourceObject.Load(20, &e.boundDest) - stateSourceObject.Load(21, &e.effectiveNetProtos) - stateSourceObject.Load(22, &e.recentTSTime) - stateSourceObject.Load(23, &e.shutdownFlags) - stateSourceObject.Load(24, &e.tcpRecovery) - stateSourceObject.Load(25, &e.sack) - stateSourceObject.Load(26, &e.delay) - stateSourceObject.Load(27, &e.scoreboard) - stateSourceObject.LoadWait(28, &e.segmentQueue) - stateSourceObject.Load(29, &e.userMSS) - stateSourceObject.Load(30, &e.maxSynRetries) - stateSourceObject.Load(31, &e.windowClamp) - stateSourceObject.Load(32, &e.sndQueueInfo) - stateSourceObject.Load(33, &e.cc) - stateSourceObject.Load(34, &e.keepalive) - stateSourceObject.Load(35, &e.userTimeout) - stateSourceObject.Load(36, &e.deferAccept) - stateSourceObject.Load(37, &e.acceptQueue) - stateSourceObject.LoadWait(38, &e.rcv) - stateSourceObject.LoadWait(39, &e.snd) - stateSourceObject.Load(40, &e.connectingAddress) - stateSourceObject.Load(41, &e.amss) - stateSourceObject.Load(42, &e.sendTOS) - stateSourceObject.Load(43, &e.gso) - stateSourceObject.Load(44, &e.stats) - stateSourceObject.Load(45, &e.tcpLingerTimeout) - stateSourceObject.Load(46, &e.closed) - stateSourceObject.Load(47, &e.txHash) - stateSourceObject.Load(48, &e.owner) - stateSourceObject.Load(49, &e.ops) - stateSourceObject.Load(50, &e.lastOutOfWindowAckTime) + stateSourceObject.Load(12, &e.connectionDirectionState) + stateSourceObject.Load(13, &e.boundNICID) + stateSourceObject.Load(14, &e.ipv4TTL) + stateSourceObject.Load(15, &e.ipv6HopLimit) + stateSourceObject.Load(16, &e.isConnectNotified) + stateSourceObject.Load(17, &e.h) + stateSourceObject.Load(18, &e.portFlags) + stateSourceObject.Load(19, &e.boundBindToDevice) + stateSourceObject.Load(20, &e.boundPortFlags) + stateSourceObject.Load(21, &e.boundDest) + stateSourceObject.Load(22, &e.effectiveNetProtos) + stateSourceObject.Load(23, &e.recentTSTime) + stateSourceObject.Load(24, &e.shutdownFlags) + stateSourceObject.Load(25, &e.tcpRecovery) + stateSourceObject.Load(26, &e.sack) + stateSourceObject.Load(27, &e.delay) + stateSourceObject.Load(28, &e.scoreboard) + stateSourceObject.LoadWait(29, &e.segmentQueue) + stateSourceObject.Load(30, &e.userMSS) + stateSourceObject.Load(31, &e.maxSynRetries) + stateSourceObject.Load(32, &e.windowClamp) + stateSourceObject.Load(33, &e.sndQueueInfo) + stateSourceObject.Load(34, &e.cc) + stateSourceObject.Load(35, &e.keepalive) + stateSourceObject.Load(36, &e.userTimeout) + stateSourceObject.Load(37, &e.deferAccept) + stateSourceObject.Load(38, &e.acceptQueue) + stateSourceObject.LoadWait(39, &e.rcv) + stateSourceObject.LoadWait(40, &e.snd) + stateSourceObject.Load(41, &e.connectingAddress) + stateSourceObject.Load(42, &e.amss) + stateSourceObject.Load(43, &e.sendTOS) + stateSourceObject.Load(44, &e.gso) + stateSourceObject.Load(45, &e.stats) + stateSourceObject.Load(46, &e.tcpLingerTimeout) + stateSourceObject.Load(47, &e.closed) + stateSourceObject.Load(48, &e.txHash) + stateSourceObject.Load(49, &e.owner) + stateSourceObject.Load(50, &e.ops) + stateSourceObject.Load(51, &e.lastOutOfWindowAckTime) stateSourceObject.LoadValue(11, new(EndpointState), func(y any) { e.loadState(y.(EndpointState)) }) stateSourceObject.AfterLoad(e.afterLoad) } diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go index b3d448b2d7..a9948a1c65 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/endpoint.go @@ -21,7 +21,7 @@ import ( "math" "time" - "gvisor.dev/gvisor/pkg/bufferv2" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/checksum" @@ -436,7 +436,7 @@ func (e *endpoint) prepareForWrite(p tcpip.Payloader, opts tcpip.WriteOptions) ( return udpPacketInfo{}, &tcpip.ErrMessageTooLong{} } - var buf bufferv2.Buffer + var buf buffer.Buffer if _, err := buf.WriteFromReader(p, int64(p.Len())); err != nil { buf.Release() ctx.Release() @@ -593,7 +593,7 @@ func (e *endpoint) GetSockOpt(opt tcpip.GettableSocketOption) tcpip.Error { // udpPacketInfo holds information needed to send a UDP packet. type udpPacketInfo struct { ctx network.WriteContext - data bufferv2.Buffer + data buffer.Buffer localPort uint16 remotePort uint16 } @@ -745,6 +745,9 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) tcpip.Error { } } + if e.net.State() == transport.DatagramEndpointStateBound { + return &tcpip.ErrNotConnected{} + } return nil } @@ -807,7 +810,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) tcpip.Error { // wildcard (empty) address, and this is an IPv6 endpoint with v6only // set to false. netProtos := []tcpip.NetworkProtocolNumber{boundNetProto} - if boundNetProto == header.IPv6ProtocolNumber && !e.ops.GetV6Only() && boundAddr == "" && e.stack.CheckNetworkProtocol(header.IPv4ProtocolNumber) { + if boundNetProto == header.IPv6ProtocolNumber && !e.ops.GetV6Only() && boundAddr == (tcpip.Address{}) && e.stack.CheckNetworkProtocol(header.IPv4ProtocolNumber) { netProtos = []tcpip.NetworkProtocolNumber{ header.IPv6ProtocolNumber, header.IPv4ProtocolNumber, diff --git a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go index 4997fa11cb..711a5ed3da 100644 --- a/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go +++ b/vendor/gvisor.dev/gvisor/pkg/tcpip/transport/udp/forwarder.go @@ -47,7 +47,7 @@ func (f *Forwarder) HandlePacket(id stack.TransportEndpointID, pkt stack.PacketB f.handler(&ForwarderRequest{ stack: f.stack, id: id, - pkt: pkt, + pkt: pkt.IncRef(), }) return true diff --git a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go index ff84959894..89a332c1c6 100644 --- a/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go +++ b/vendor/gvisor.dev/gvisor/pkg/waiter/waiter.go @@ -75,8 +75,9 @@ const ( EventRdNorm EventMask = 0x0040 // POLLRDNORM EventWrNorm EventMask = 0x0100 // POLLWRNORM EventInternal EventMask = 0x1000 + EventRdHUp EventMask = 0x2000 // POLLRDHUP - allEvents EventMask = 0x1f | EventRdNorm | EventWrNorm + allEvents EventMask = 0x1f | EventRdNorm | EventWrNorm | EventRdHUp ReadableEvents EventMask = EventIn | EventRdNorm WritableEvents EventMask = EventOut | EventWrNorm ) diff --git a/vendor/modules.txt b/vendor/modules.txt index adbbf07e79..00f07f88a6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,8 +93,8 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/containers/gvisor-tap-vsock v0.6.2 -## explicit; go 1.18 +# github.com/containers/gvisor-tap-vsock v0.7.1 +## explicit; go 1.20 github.com/containers/gvisor-tap-vsock/pkg/client github.com/containers/gvisor-tap-vsock/pkg/fs github.com/containers/gvisor-tap-vsock/pkg/net/stdio @@ -518,7 +518,7 @@ github.com/mdlayher/vsock # github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d ## explicit github.com/mgutz/ansi -# github.com/miekg/dns v1.1.55 +# github.com/miekg/dns v1.1.56 ## explicit; go 1.19 github.com/miekg/dns # github.com/miekg/pkcs11 v1.1.1 @@ -546,7 +546,7 @@ github.com/munnerz/goautoneg # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.9.7 +# github.com/onsi/ginkgo/v2 v2.11.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -568,7 +568,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.27.8 +# github.com/onsi/gomega v1.27.10 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -590,7 +590,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.1.5 ## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v1.0.3-0.20220706171101-8d0d6d41d096 +# github.com/opencontainers/runtime-spec v1.1.0-rc.1 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/openshift/api v0.0.0-20230711143145-b5a47f95ba70 @@ -785,7 +785,7 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore # go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 ## explicit; go 1.11 go.mozilla.org/pkcs7 -# golang.org/x/crypto v0.11.0 +# golang.org/x/crypto v0.13.0 ## explicit; go 1.17 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -815,10 +815,10 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/mod v0.10.0 +# golang.org/x/mod v0.12.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.12.0 +# golang.org/x/net v0.15.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -845,7 +845,7 @@ golang.org/x/oauth2/internal golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.10.0 +# golang.org/x/sys v0.12.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -853,10 +853,10 @@ golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.10.0 +# golang.org/x/term v0.12.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.11.0 +# golang.org/x/text v0.13.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/encoding @@ -884,7 +884,7 @@ golang.org/x/text/width # golang.org/x/time v0.2.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.9.1 +# golang.org/x/tools v0.13.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/inspector @@ -896,6 +896,7 @@ golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal @@ -1023,11 +1024,11 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# gvisor.dev/gvisor v0.0.0-20221216231429-a78e892a26d2 -## explicit; go 1.18 +# gvisor.dev/gvisor v0.0.0-20230715022000-fd277b20b8db +## explicit; go 1.20 gvisor.dev/gvisor/pkg/atomicbitops gvisor.dev/gvisor/pkg/bits -gvisor.dev/gvisor/pkg/bufferv2 +gvisor.dev/gvisor/pkg/buffer gvisor.dev/gvisor/pkg/context gvisor.dev/gvisor/pkg/cpuid gvisor.dev/gvisor/pkg/gohacks