diff --git a/go.sum b/go.sum index d0d2d541d..62632a48f 100644 --- a/go.sum +++ b/go.sum @@ -102,7 +102,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= @@ -116,6 +120,11 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= +github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= +github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -133,6 +142,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -244,6 +254,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -304,6 +315,8 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jarrpa/ocs-operator/services/provider v1.4.12-0.rc2 h1:6aND2go4SlSd6yKagjJUbMEz4efc/1Pnqy0pYTab+E4= +github.com/jarrpa/ocs-operator/services/provider v1.4.12-0.rc2/go.mod h1:9Z6a3HgJvRkFmToWQOsADPVNtTF+ncdWYc9en0rHluU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= @@ -389,6 +402,9 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/openshift/api v0.0.0-20220421141645-441fe135b2fc h1:snZcb7+9+Hnb2VIp4QbUpClqK/4R3CK/lTyeDoGhefA= +github.com/openshift/api v0.0.0-20220421141645-441fe135b2fc/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= +github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -435,6 +451,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -523,6 +540,7 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -654,6 +672,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -783,6 +802,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -897,6 +917,8 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20220207185906-7721543eae58 h1:i67FGOy2/zGfhE3YgHdrOrcFbOBhqdcRoBrsDqSQrOI= +google.golang.org/genproto v0.0.0-20220207185906-7721543eae58/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -921,6 +943,8 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -952,6 +976,7 @@ gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -977,11 +1002,13 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw= k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh0rveU= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.6 h1:RH1UweWJkWNTlFx0D8uxOpaU1tjIOvVVWV/bu5b3/NQ= @@ -989,6 +1016,7 @@ k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hr k8s.io/apiserver v0.23.4/go.mod h1:A6l/ZcNtxGfPSqbFDoxxOjEjSKBaQmE+UTveOmMkpNc= k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= +k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.23.4/go.mod h1:8o3Gg8i2vnUXGPOwciiYlkSaZT+p+7gA9Scoz8y4W4E= k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE= @@ -1003,6 +1031,7 @@ k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lV k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1014,6 +1043,7 @@ sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8 sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 000000000..5c389317e --- /dev/null +++ b/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml new file mode 100644 index 000000000..f2e2cc365 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusteroperators.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterOperator + listKind: ClusterOperatorList + plural: clusteroperators + shortNames: + - co + singular: clusteroperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem. + type: object + properties: + conditions: + description: conditions describes the state of the operator's managed and monitored components. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + extension: + description: extension contains any additional status information specific to the operator which owns this status object. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces' + type: array + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + versions: + description: versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name "operator". An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + type: array + items: + type: object + required: + - name + - version + properties: + name: + description: name is the name of the particular operand this version is for. It usually matches container images, not operators. + type: string + version: + description: version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml new file mode 100644 index 000000000..24f379c13 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -0,0 +1,407 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. + type: object + required: + - clusterID + properties: + capabilities: + description: capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. + type: object + properties: + additionalEnabledCapabilities: + description: additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + x-kubernetes-list-type: atomic + baselineCapabilitySet: + description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. + type: string + enum: + - None + - v4.11 + - vCurrent + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + type: object + properties: + force: + description: force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + type: boolean + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + status: + description: status contains information about the available updates and any in-progress updates. + type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + type: array + items: + description: Release represents an OpenShift release image and associated metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + nullable: true + capabilities: + description: capabilities describes the state of optional, core cluster components. + type: object + properties: + enabledCapabilities: + description: enabledCapabilities lists all the capabilities that are currently managed. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + x-kubernetes-list-type: atomic + knownCapabilities: + description: knownCapabilities lists all the capabilities known to the current cluster. + type: array + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + type: string + enum: + - openshift-samples + - baremetal + - marketplace + x-kubernetes-list-type: atomic + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + type: array + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + type: object + required: + - release + - risks + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + type: array + minItems: 1 + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + type: object + required: + - matchingRules + - message + - name + - url + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + type: array + minItems: 1 + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + type: object + required: + - type + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + type: object + required: + - promql + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + type: string + enum: + - Always + - PromQL + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + minLength: 1 + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + type: string + minLength: 1 + url: + description: url contains information about this risk. + type: string + format: uri + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + type: boolean + version: + description: version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml new file mode 100644 index 000000000..246225397 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -0,0 +1,78 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: proxies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Proxy + listKind: ProxyList + plural: proxies + singular: proxy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml new file mode 100644 index 000000000..e9895002f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml @@ -0,0 +1,84 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + capability.openshift.io/name: "marketplace" + name: operatorhubs.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OperatorHub + listKind: OperatorHubList + plural: operatorhubs + singular: operatorhub + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + type: object + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block. + type: array + items: + description: HubSource is used to specify the hub source and its configuration + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here. + type: object + properties: + sources: + description: sources encapsulates the result of applying the configuration for each hub source + type: array + items: + description: HubSourceStatus is used to reflect the current state of applying the configuration to a default source + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: status indicates success or failure in applying the configuration + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml new file mode 100644 index 000000000..3ff78377a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -0,0 +1,177 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml new file mode 100644 index 000000000..bb695bac7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -0,0 +1,101 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml new file mode 100644 index 000000000..f67be27db --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -0,0 +1,271 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: builds.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Build + listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. \n The canonical name is \"cluster\" \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the build controller configuration + type: object + properties: + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config. \n DEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + buildDefaults: + description: BuildDefaults controls the default information for Builds + type: object + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download. \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + env: + description: Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build + type: array + items: + description: EnvVar represents an environment variable present in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + gitProxy: + description: "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone. \n Values that are not set here will be inherited from DefaultProxy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + resources: + description: Resources defines resource requirements to execute the build. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + buildOverrides: + description: BuildOverrides controls override settings for builds + type: object + properties: + forcePull: + description: ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself + type: boolean + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + nodeSelector: + description: NodeSelector is a selector which must be true for the build pod to fit on a node + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml new file mode 100644 index 000000000..188b45e01 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoles.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration for console authentication. + type: object + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user''s token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.' + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + consoleURL: + description: The URL for the console. This will be derived from the host for the route that is created for the console. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml new file mode 100644 index 000000000..e4fa56eee --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -0,0 +1,72 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." + type: string + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml new file mode 100644 index 000000000..5254d0ce2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: featuregates.config.openshift.io +spec: + group: config.openshift.io + names: + kind: FeatureGate + listKind: FeatureGateList + plural: featuregates + singular: featuregate + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Feature holds cluster-wide information about feature gates. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + type: object + properties: + disabled: + description: disabled is a list of all feature gates that you want to force off + type: array + items: + type: string + enabled: + description: enabled is a list of all feature gates that you want to force on + type: array + items: + type: string + nullable: true + featureSet: + description: featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml new file mode 100644 index 000000000..a160fef40 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: images.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Image + listKind: ImageList + plural: images + singular: image + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions. + type: array + items: + description: RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'. + type: object + properties: + domainName: + description: domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure. + type: boolean + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + type: array + items: + type: string + registrySources: + description: registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. + type: object + properties: + allowedRegistries: + description: "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + blockedRegistries: + description: "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + containerRuntimeSearchRegistries: + description: 'containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.' + type: array + format: hostname + minItems: 1 + items: + type: string + x-kubernetes-list-type: set + insecureRegistries: + description: insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + type: array + items: + type: string + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml new file mode 100644 index 000000000..147c73c44 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/874 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: imagecontentpolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageContentPolicy + listKind: ImageContentPolicyList + plural: imagecontentpolicies + singular: imagecontentpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\". \n Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified." + type: array + items: + description: RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. + type: object + required: + - source + properties: + allowMirrorByTags: + description: allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue. + type: boolean + mirrors: + description: mirrors is zero or more repositories that may also contain the same images. If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. + type: array + items: + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-type: set + source: + description: source is the repository that users refer to, e.g. in image pull specifications. + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-map-keys: + - source + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml new file mode 100644 index 000000000..7db041fba --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -0,0 +1,532 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + platformSpec: + description: platformSpec holds desired information specific to the underlying infrastructure provider. + type: object + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + type: object + required: + - prismCentral + - prismElements + properties: + prismCentral: + description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + type: object + required: + - address + - port + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + type: string + maxLength: 256 + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + type: integer + format: int32 + maximum: 65535 + minimum: 1 + prismElements: + description: prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. + type: array + items: + description: NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) + type: object + required: + - endpoint + - name + properties: + endpoint: + description: endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + type: object + required: + - address + - port + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + type: string + maxLength: 256 + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + type: integer + format: int32 + maximum: 65535 + minimum: 1 + name: + description: name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). + type: string + maxLength: 256 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + type: array + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + type: object + required: + - name + - url + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + format: uri + pattern: ^https:// + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + type: string + controlPlaneTopology: + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + - External + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + platform: + description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + platformStatus: + description: platformStatus holds status information specific to the underlying infrastructure provider. + type: object + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + required: + - region + properties: + region: + description: region specifies the region for Alibaba Cloud resources created for the cluster. + type: string + pattern: ^[0-9A-Za-z-]+$ + resourceGroupID: + description: resourceGroupID is the ID of the resource group for the cluster. + type: string + pattern: ^(rg-[0-9A-Za-z]+)?$ + resourceTags: + description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + type: array + maxItems: 20 + items: + description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag. + type: string + maxLength: 128 + minLength: 1 + value: + description: value is the value of the tag. + type: string + maxLength: 128 + minLength: 1 + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + region: + description: region holds the default AWS region for new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + type: array + maxItems: 25 + items: + description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag + type: string + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + value: + description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + type: string + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + type: string + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. + type: string + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created for the cluster. + type: string + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + type: string + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + cloudName: + description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' + type: string + powervs: + description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for new Power VS resources created by the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + type: array + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + type: object + required: + - name + - url + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + format: uri + pattern: ^https:// + zone: + description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' + type: string + type: + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml new file mode 100644 index 000000000..ffc366607 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -0,0 +1,281 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list. \n To determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes." + type: array + items: + description: ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. + type: object + required: + - hostname + - name + - namespace + properties: + hostname: + description: hostname is the hostname that should be used by the route. + type: string + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + name: + description: "name is the logical name of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + domain: + description: "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\". \n It is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\". \n Once set, changing domain is not currently supported." + type: string + requiredHSTSPolicies: + description: "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission. \n A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains \n - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation. \n The HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. \n Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid." + type: array + items: + type: object + required: + - domainPatterns + properties: + domainPatterns: + description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*." + type: array + minItems: 1 + items: + type: string + includeSubDomainsPolicy: + description: 'includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host''s domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' + type: string + enum: + - RequireIncludeSubDomains + - RequireNoIncludeSubDomains + - NoOpinion + maxAge: + description: maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client. + type: object + properties: + largestMaxAge: + description: The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. + type: integer + format: int32 + maximum: 2147483647 + minimum: 0 + smallestMaxAge: + description: The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. + type: integer + format: int32 + maximum: 2147483647 + minimum: 0 + namespaceSelector: + description: namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + preloadPolicy: + description: preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent). + type: string + enum: + - RequirePreload + - RequireNoPreload + - NoOpinion + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + componentRoutes: + description: componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin. + type: array + items: + description: ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. + type: object + required: + - defaultHostname + - name + - namespace + - relatedObjects + properties: + conditions: + description: "conditions are used to communicate the state of the componentRoutes entry. \n Supported conditions include Available, Degraded and Progressing. \n If available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured. \n If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect. \n If Progressing is true, that means the component is taking some action related to the componentRoutes entry." + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + currentHostnames: + description: currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + type: array + minItems: 1 + items: + description: "Hostname is an alias for hostname string validation. \n The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ \n The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, except that it allows hostnames longer than the maximum length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ \n Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname was saved via validation by the incorrect left operand of the | operator." + type: string + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + defaultHostname: + description: defaultHostname is the hostname of this route prior to customization. + type: string + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + name: + description: "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + relatedObjects: + description: relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + type: array + minItems: 1 + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + defaultPlacement: + description: "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes. \n This field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments. \n See the documentation for the IngressController replicas and nodePlacement fields for more information. \n When omitted, the default value is Workers" + type: string + enum: + - ControlPlane + - Workers + - "" + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml new file mode 100644 index 000000000..c01178506 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -0,0 +1,163 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + mtu: + description: MTU contains the MTU migration configuration. + type: object + properties: + machine: + description: Machine contains MTU migration configuration for the machine's uplink. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: Network contains MTU migration configuration for the default network. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + type: array + items: + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml new file mode 100644 index 000000000..915961e14 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_node.crd.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1107 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: nodes.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Node + listKind: NodeList + plural: nodes + singular: node + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Node holds cluster-wide information about node specific features. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cgroupMode: + description: CgroupMode determines the cgroups version on the node + type: string + enum: + - v1 + - v2 + - "" + workerLatencyProfile: + description: WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster + type: string + enum: + - Default + - MediumUpdateAverageReaction + - LowUpdateSlowReaction + status: + description: status holds observed values. + type: object + properties: + workerLatencyProfileStatus: + description: WorkerLatencyProfileStatus provides the current status of WorkerLatencyProfile + type: object + properties: + conditions: + description: conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml new file mode 100644 index 000000000..883c623b3 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -0,0 +1,444 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + type: array + items: + description: IdentityProvider provides identities for users authenticating using credentials + type: object + properties: + basicAuth: + description: basicAuth contains configuration options for the BasicAuth IdP + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + github: + description: github enables user authentication using GitHub credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations are allowed to log in + type: array + items: + type: string + teams: + description: teams optionally restricts which teams are allowed to log in. Format is /. + type: array + items: + type: string + gitlab: + description: gitlab enables user authentication using GitLab credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the oauth server base URL + type: string + google: + description: google enables user authentication using Google credentials + type: object + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostedDomain: + description: hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + type: string + htpasswd: + description: htpasswd enables user authentication using an HTPasswd file to validate credentials + type: object + properties: + fileData: + description: fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + keystone: + description: keystone enables user authentication using keystone password credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + ldap: + description: ldap enables user authentication using LDAP credentials + type: object + properties: + attributes: + description: attributes maps LDAP attributes to identities + type: object + properties: + email: + description: email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + id: + description: id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + type: array + items: + type: string + name: + description: name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + type: array + items: + type: string + bindDN: + description: bindDN is an optional DN to bind with during the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + insecure: + description: 'insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + mappingMethod: + description: mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + claims: + description: claims mappings + type: object + properties: + email: + description: email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + x-kubernetes-list-type: atomic + groups: + description: groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used. + type: array + items: + description: OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo responses + type: string + minLength: 1 + x-kubernetes-list-type: atomic + name: + description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + type: array + items: + type: string + x-kubernetes-list-type: atomic + preferredUsername: + description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + type: array + items: + type: string + x-kubernetes-list-type: atomic + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + extraAuthorizeParameters: + description: extraAuthorizeParameters are any custom parameters to add to the authorize request. + type: object + additionalProperties: + type: string + extraScopes: + description: extraScopes are any scopes to request in addition to the standard "openid" scope. + type: array + items: + type: string + issuer: + description: issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + type: string + requestHeader: + description: requestHeader enables user authentication using request header credentials + type: object + properties: + ca: + description: ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + challengeURL: + description: challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + type: array + items: + type: string + emailHeaders: + description: emailHeaders is the set of headers to check for the email address + type: array + items: + type: string + headers: + description: headers is the set of headers to check for identity information + type: array + items: + type: string + loginURL: + description: loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for the display name + type: array + items: + type: string + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers to check for the preferred username + type: array + items: + type: string + type: + description: type identifies the identity provider type for this entry. + type: string + x-kubernetes-list-type: atomic + templates: + description: templates allow you to customize pages like the login page. + type: object + properties: + error: + description: error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + login: + description: login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + providerSelection: + description: providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tokenConfig: + description: tokenConfig contains options for authorization and access tokens + type: object + properties: + accessTokenInactivityTimeout: + description: "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. \n WARNING: existing tokens' timeout will not be affected (lowered) by changing this value" + type: string + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' + type: integer + format: int32 + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of access tokens + type: integer + format: int32 + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml new file mode 100644 index 000000000..42f745c67 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: projects.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Project holds cluster-wide information about Project. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. + type: object + properties: + name: + description: name is the metadata.name of the referenced project request template + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml new file mode 100644 index 000000000..f161bc432 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Scheduler + listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod''s selector. For example, defaultNodeSelector: "type=user-node,region=east" would set nodeSelector field in pod spec to "type=user-node,region=east" to all pods created in all namespaces. Namespaces having project-wide node selectors won''t be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: "type=user-node,region=west" means that the default of "type=user-node,region=east" set in defaultNodeSelector would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + profile: + description: "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"" + type: string + enum: + - "" + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go new file mode 100644 index 000000000..4ff5208f2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go new file mode 100644 index 000000000..61302592e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -0,0 +1,78 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIServer{}, + &APIServerList{}, + &Authentication{}, + &AuthenticationList{}, + &Build{}, + &BuildList{}, + &ClusterOperator{}, + &ClusterOperatorList{}, + &ClusterVersion{}, + &ClusterVersionList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &FeatureGate{}, + &FeatureGateList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Node{}, + &NodeList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, + &Project{}, + &ProjectList{}, + &Proxy{}, + &ProxyList{}, + &Scheduler{}, + &SchedulerList{}, + &ImageContentPolicy{}, + &ImageContentPolicyList{}, + &ImageDigestMirrorSet{}, + &ImageDigestMirrorSetList{}, + &ImageTagMirrorSet{}, + &ImageTagMirrorSetList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go new file mode 100644 index 000000000..6a5718c1d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go new file mode 100644 index 000000000..56d00648e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -0,0 +1,400 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigMapFileReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapFileReference struct { + Name string `json:"name"` + // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + Key string `json:"key,omitempty"` +} + +// ConfigMapNameReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapNameReference struct { + // name is the metadata.name of the referenced config map + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// SecretNameReference references a secret in a specific namespace. +// The namespace must be specified at the point of use. +type SecretNameReference struct { + // name is the metadata.name of the referenced secret + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` + // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // BindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // +optional + ClientCA string `json:"clientCA,omitempty"` + // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` + // MinTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // CipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // Names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names,omitempty"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// LeaderElection provides information to elect a leader +type LeaderElection struct { + // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. + Disable bool `json:"disable,omitempty"` + // namespace indicates which namespace the resource is in + Namespace string `json:"namespace,omitempty"` + // name indicates what name to use for the resource + Name string `json:"name,omitempty"` + + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + // +nullable + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + // +nullable + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + // +nullable + RetryPeriod metav1.Duration `json:"retryPeriod"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // KeyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // URL is the remote URL to connect to + URL string `json:"url"` + // CA is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type AdmissionConfig struct { + PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` + + // enabledPlugins is a list of admission plugins that must be on in addition to the default list. + // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon + // and can result in performance penalties and unexpected behavior. + EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` + + // disabledPlugins is a list of admission plugins that must be off. Putting something in this list + // is almost always a mistake and likely to result in cluster instability. + DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // Location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + Configuration runtime.RawExtension `json:"configuration"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` + + // PolicyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // PolicyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // URLs are the URLs for etcd + URLs []string `json:"urls,omitempty"` + // CA is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type EtcdStorageConfig struct { + EtcdConnectionInfo `json:",inline"` + + // StoragePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. + StoragePrefix string `json:"storagePrefix"` +} + +// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd +type GenericAPIServerConfig struct { + // servingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // corsAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // auditConfig describes how to configure audit information + AuditConfig AuditConfig `json:"auditConfig"` + + // storageConfig contains information about how to use + StorageConfig EtcdStorageConfig `json:"storageConfig"` + + // admissionConfig holds information about how to configure admission. + AdmissionConfig AdmissionConfig `json:"admission"` + + KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` +} + +type KubeClientConfig struct { + // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config + KubeConfig string `json:"kubeConfig"` + + // connectionOverrides specifies client overrides for system components to loop back to this master. + ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` +} + +type ClientConnectionOverrides struct { + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// GenericControllerConfig provides information to configure a controller +type GenericControllerConfig struct { + // ServingInfo is the HTTP serving information for the controller's endpoints + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection LeaderElection `json:"leaderElection"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} +type RequiredHSTSPolicy struct { + // namespaceSelector specifies a label selector such that the policy applies only to those routes that + // are in namespaces with labels that match the selector, and are in one of the DomainPatterns. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // domainPatterns is a list of domains for which the desired HSTS annotations are required. + // If domainPatterns is specified and a route is created with a spec.host matching one of the domains, + // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. + // + // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. + // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + DomainPatterns []string `json:"domainPatterns"` + + // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. + // If set to 0, it negates the effect, and hosts are removed as HSTS hosts. + // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. + // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS + // policy will eventually expire on that client. + MaxAge MaxAgePolicy `json:"maxAge"` + + // preloadPolicy directs the client to include hosts in its host preload list so that + // it never needs to do an initial load to get the HSTS header (note that this is not defined + // in RFC 6797 and is therefore client implementation-dependent). + // +optional + PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"` + + // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's + // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: + // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com + // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com + // +optional + IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` +} + +// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy +type MaxAgePolicy struct { + // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // This value can be left unspecified, in which case no upper limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` + + // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary + // tool for administrators to quickly correct mistakes. + // This value can be left unspecified, in which case no lower limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` +} + +// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion +type PreloadPolicy string + +const ( + // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy + RequirePreloadPolicy PreloadPolicy = "RequirePreload" + + // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy + RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload" + + // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy + NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion" +) + +// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy +// for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion +type IncludeSubDomainsPolicy string + +const ( + // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy + RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains" + + // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy + RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains" + + // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy + NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go new file mode 100644 index 000000000..31801aacf --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -0,0 +1,211 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServer holds configuration (like serving certificates, client CA and CORS domains) +// shared by all API servers in the system, among them especially kube-apiserver +// and openshift-apiserver. The canonical name of an instance is 'cluster'. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec APIServerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status APIServerStatus `json:"status"` +} + +type APIServerSpec struct { + // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates + // will be used for serving secure traffic. + // +optional + ServingCerts APIServerServingCerts `json:"servingCerts"` + // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for + // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. + // You usually only have to set this if you have your own PKI you wish to honor client certificates from. + // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: + // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + // +optional + ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + // encryption allows the configuration of encryption of resources at the datastore layer. + // +optional + Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // If unset, a default (which may change between releases) is chosen. Note that only Old, + // Intermediate and Custom profiles are currently supported, and the maximum available + // MinTLSVersions is VersionTLS12. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // audit specifies the settings for audit configuration to be applied to all OpenShift-provided + // API servers in the cluster. + // +optional + // +kubebuilder:default={profile: Default} + Audit Audit `json:"audit"` +} + +// AuditProfileType defines the audit policy profile type. +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None +type AuditProfileType string + +const ( + // "None" disables audit logs. + NoneAuditProfileType AuditProfileType = "None" + + // "Default" is the existing default audit configuration policy. + DefaultAuditProfileType AuditProfileType = "Default" + + // "WriteRequestBodies" is similar to Default but it logs request and response + // HTTP payloads for write requests (create, update, patch) + WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" + + // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request + // and response HTTP payloads for read requests (get, list). + AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" +) + +type Audit struct { + // profile specifies the name of the desired top-level audit profile to be applied to all requests + // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, + // openshift-apiserver and oauth-apiserver), with the exception of those requests that match + // one or more of the customRules. + // + // The following profiles are provided: + // - Default: default policy which means MetaData level logging with the exception of events + // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody + // level). + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // Warning: It is not recommended to disable audit logging by using the `None` profile unless you + // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. + // If you disable audit logging and a support situation arises, you might need to enable audit logging + // and reproduce the issue in order to troubleshoot properly. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` + // customRules specify profiles per group. These profile take precedence over the + // top-level profile field if they apply. They are evaluation from top to bottom and + // the first one that matches, applies. + // +listType=map + // +listMapKey=group + // +optional + CustomRules []AuditCustomRule `json:"customRules,omitempty"` +} + +// AuditCustomRule describes a custom rule for an audit profile that takes precedence over +// the top-level profile. +type AuditCustomRule struct { + // group is a name of group a request user must be member of in order to this profile to apply. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Group string `json:"group"` + // profile specifies the name of the desired audit policy configuration to be deployed to + // all OpenShift-provided API servers in the cluster. + // + // The following profiles are provided: + // - Default: the existing default policy. + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:validation:Required + // +required + Profile AuditProfileType `json:"profile,omitempty"` +} + +type APIServerServingCerts struct { + // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. + // If no named certificates are provided, or no named certificates match the server name as understood by a client, + // the defaultServingCertificate will be used. + // +optional + NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` +} + +// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +type APIServerNamedServingCert struct { + // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to + // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. + // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + // +optional + Names []string `json:"names,omitempty"` + // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. + // The secret must exist in the openshift-config namespace and contain the following required fields: + // - Secret.Data["tls.key"] - TLS private key. + // - Secret.Data["tls.crt"] - TLS certificate. + ServingCertificate SecretNameReference `json:"servingCertificate"` +} + +type APIServerEncryption struct { + // type defines what encryption type should be used to encrypt resources at the datastore layer. + // When this field is unset (i.e. when it is set to the empty string), identity is implied. + // The behavior of unset can and will change over time. Even if encryption is enabled by default, + // the meaning of unset may change to a different encryption type based on changes in best practices. + // + // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. + // This list of sensitive resources can and will change over time. The current authoritative list is: + // + // 1. secrets + // 2. configmaps + // 3. routes.route.openshift.io + // 4. oauthaccesstokens.oauth.openshift.io + // 5. oauthauthorizetokens.oauth.openshift.io + // + // +unionDiscriminator + // +optional + Type EncryptionType `json:"type,omitempty"` +} + +// +kubebuilder:validation:Enum="";identity;aescbc +type EncryptionType string + +const ( + // identity refers to a type where no encryption is performed at the datastore layer. + // Resources are written as-is without encryption. + EncryptionTypeIdentity EncryptionType = "identity" + + // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESCBC EncryptionType = "aescbc" +) + +type APIServerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []APIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 000000000..7f346069e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,156 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication specifies cluster-wide settings for authentication (like OAuth and +// webhook token authenticators). The canonical name of an instance is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // type identifies the cluster managed, user facing authentication mode in use. + // Specifically, it manages the component that responds to login attempts. + // The default is IntegratedOAuth. + // +optional + Type AuthenticationType `json:"type"` + + // oauthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for an external OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // If oauthMetadata.name is non-empty, this value has precedence + // over any metadata reference stored in status. + // The key "oauthMetadata" is used to locate the data. + // If specified and the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config. + // +optional + OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` + + // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` + + // webhookTokenAuthenticator configures a remote token reviewer. + // These remote authentication webhooks can be used to verify bearer tokens + // via the tokenreviews.authentication.k8s.io REST API. This is required to + // honor bearer tokens that are provisioned by an external authentication service. + // +optional + WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` + + // serviceAccountIssuer is the identifier of the bound service account token + // issuer. + // The default is https://kubernetes.default.svc + // WARNING: Updating this field will result in the invalidation of + // all bound tokens with the previous issuer value. Unless the + // holder of a bound token has explicit support for a change in + // issuer, they will not request a new bound token until pod + // restart or until their existing token exceeds 80% of its + // duration. + // +optional + ServiceAccountIssuer string `json:"serviceAccountIssuer"` +} + +type AuthenticationStatus struct { + // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for the in-cluster integrated OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This contains the observed value based on cluster state. + // An explicitly set value in spec.oauthMetadata has precedence over this field. + // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + // The key "oauthMetadata" is used to locate the data. + // If the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config-managed. + IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} + +type AuthenticationType string + +const ( + // None means that no cluster managed authentication system is in place. + // Note that user login will only work if a manually configured system is in place and + // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators. + AuthenticationTypeNone AuthenticationType = "None" + + // IntegratedOAuth refers to the cluster managed OAuth server. + // It is configured via the top level OAuth config. + AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // AuthenticationTypeKeycloak AuthenticationType = "Keycloak" +) + +// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. +// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +type DeprecatedWebhookTokenAuthenticator struct { + // kubeConfig contains kube config file data which describes how to access the remote webhook service. + // For further details, see: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // The namespace for this secret is determined by the point of use. + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator +type WebhookTokenAuthenticator struct { + // kubeConfig references a secret that contains kube config file data which + // describes how to access the remote webhook service. + // The namespace for the referenced secret is openshift-config. + // + // For further details, see: + // + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // +kubebuilder:validation:Required + // +required + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +const ( + // OAuthMetadataKey is the key for the oauth authorization server metadata + OAuthMetadataKey = "oauthMetadata" + + // KubeConfigKey is the key for the kube config file data in a secret + KubeConfigKey = "kubeConfig" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 000000000..34f46a1f9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,121 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the build controller configuration + // +kubebuilder:validation:Required + // +required + Spec BuildSpec `json:"spec"` +} + +type BuildSpec struct { + // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + // BuildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults"` + // BuildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides"` +} + +type BuildDefaults struct { + // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // and source download. + // + // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables + // in the build config's strategy. + // +optional + DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` + + // GitProxy contains the proxy settings for git operations only. If set, this will override + // any Proxy settings for all git commands, such as git clone. + // + // Values that are not set here will be inherited from DefaultProxy. + // +optional + GitProxy *ProxySpec `json:"gitProxy,omitempty"` + + // Env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // ImageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // Resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources"` +} + +type ImageLabel struct { + // Name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // Value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // ImageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // NodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // ForcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Build `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go new file mode 100644 index 000000000..bbe359679 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -0,0 +1,203 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOperator is the Custom Resource object which holds the current state +// of an operator. This object is used by operators to convey their state to +// the rest of the cluster. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec holds configuration that could apply to any operator. + // +kubebuilder:validation:Required + // +required + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the Kubernetes ecosystem. + // +optional + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's managed and monitored components. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple + // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". + // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + // +optional + Versions []OperandVersion `json:"versions,omitempty"` + + // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: + // 1. the detailed resource driving the operator + // 2. operator namespaces + // 3. operand namespaces + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + // +nullable + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Extension runtime.RawExtension `json:"extension"` +} + +type OperandVersion struct { + // name is the name of the particular operand this version is for. It usually matches container images, not operators. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // version indicates which version of a particular operand is currently being managed. It must always match the Available + // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout + // 1.1.0 + // +kubebuilder:validation:Required + // +required + Version string `json:"version"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + // resource of the referent. + // +kubebuilder:validation:Required + // +required + Resource string `json:"resource"` + // namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the aspect reported by this condition. + // +kubebuilder:validation:Required + // +required + Type ClusterStatusConditionType `json:"type"` + + // status of the condition, one of True, False, Unknown. + // +kubebuilder:validation:Required + // +required + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the time of the last update to the current status property. + // +kubebuilder:validation:Required + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the CamelCase reason for the condition's current status. + // +optional + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is an aspect of operator state. +type ClusterStatusConditionType string + +const ( + // Available indicates that the component (operator and all configured operands) + // is functional and available in the cluster. Available=False means at least + // part of the component is non-functional, and that the condition requires + // immediate administrator intervention. + OperatorAvailable ClusterStatusConditionType = "Available" + + // Progressing indicates that the component (operator and all configured operands) + // is actively rolling out new code, propagating config changes, or otherwise + // moving from one steady state to another. Operators should not report + // progressing when they are reconciling (without action) a previously known + // state. If the observed cluster state has changed and the component is + // reacting to it (scaling up for instance), Progressing should become true + // since it is moving from one steady state to another. + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // Degraded indicates that the component (operator and all configured operands) + // does not match its desired state over a period of time resulting in a lower + // quality of service. The period of time may vary by component, but a Degraded + // state represents persistent observation of a condition. As a result, a + // component should not oscillate in and out of Degraded state. A component may + // be Available even if its degraded. For example, a component may desire 3 + // running pods, but 1 pod is crash-looping. The component is Available but + // Degraded because it may have a lower quality of service. A component may be + // Progressing but not Degraded because the transition from one state to + // another does not persist over a long enough period to report Degraded. A + // component should not report Degraded during the course of a normal upgrade. + // A component may report Degraded in response to a persistent infrastructure + // failure that requires eventual administrator intervention. For example, if + // a control plane host is unhealthy and must be replaced. A component should + // report Degraded if unexpected errors occur over a period, but the + // expectation is that all unexpected errors are handled as operators mature. + OperatorDegraded ClusterStatusConditionType = "Degraded" + + // Upgradeable indicates whether the component (operator and all configured + // operands) is safe to upgrade based on the current cluster state. When + // Upgradeable is False, the cluster-version operator will prevent the + // cluster from performing impacted updates unless forced. When set on + // ClusterVersion, the message will explain which updates (minor or patch) + // are impacted. When set on ClusterOperator, False will block minor + // OpenShift updates. The message field should contain a human readable + // description of what the administrator should do to allow the cluster or + // component to successfully update. The cluster-version operator will + // allow updates when this condition is not False, including when it is + // missing, True, or Unknown. + OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go new file mode 100644 index 000000000..95264eec6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -0,0 +1,539 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersion is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the desired state of the cluster version - the operator will work + // to ensure that the desired version is applied to the cluster. + // +kubebuilder:validation:Required + // +required + Spec ClusterVersionSpec `json:"spec"` + // status contains information about the available updates and any in-progress + // updates. + // +optional + Status ClusterVersionStatus `json:"status"` +} + +// ClusterVersionSpec is the desired version state of the cluster. It includes +// the version the cluster should be at, how the cluster is identified, and +// where the cluster should look for version updates. +// +k8s:deepcopy-gen=true +type ClusterVersionSpec struct { + // clusterID uniquely identifies this cluster. This is expected to be + // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in + // hexadecimal values). This is a required field. + // +kubebuilder:validation:Required + // +required + ClusterID ClusterID `json:"clusterID"` + + // desiredUpdate is an optional field that indicates the desired value of + // the cluster version. Setting this value will trigger an upgrade (if + // the current version does not match the desired version). The set of + // recommended update values is listed as part of available updates in + // status, and setting values outside that range may cause the upgrade + // to fail. You may specify the version field without setting image if + // an update exists with that version in the availableUpdates or history. + // + // If an upgrade fails the operator will halt and report status + // about the failing component. Setting the desired update value back to + // the previous version will cause a rollback to be attempted. Not all + // rollbacks will succeed. + // + // +optional + DesiredUpdate *Update `json:"desiredUpdate,omitempty"` + + // upstream may be used to specify the preferred update server. By default + // it will use the appropriate update server for the cluster and region. + // + // +optional + Upstream URL `json:"upstream,omitempty"` + // channel is an identifier for explicitly requesting that a non-default + // set of updates be applied to this cluster. The default channel will be + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + // capabilities configures the installation of optional, core + // cluster components. A null value here is identical to an + // empty object; see the child properties for default semantics. + // +optional + Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"` + + // overrides is list of overides for components that are managed by + // cluster version operator. Marking a component unmanaged will prevent + // the operator from creating or updating the object. + // +optional + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + // +kubebuilder:validation:Required + // +required + Desired Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // +optional + History []UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + // +kubebuilder:validation:Required + // +required + ObservedGeneration int64 `json:"observedGeneration"` + + // versionHash is a fingerprint of the content that the cluster will be + // updated with. It is used by the operator to avoid unnecessary work + // and is for internal use only. + // +kubebuilder:validation:Required + // +required + VersionHash string `json:"versionHash"` + + // capabilities describes the state of optional, core cluster components. + Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` + + // conditions provides information about the cluster version. The condition + // "Available" is set to true if the desiredUpdate has been reached. The + // condition "Progressing" is set to true if an update is being applied. + // The condition "Degraded" is set to true if an update is currently blocked + // by a temporary or permanent error. Conditions are only valid for the + // current desiredUpdate when metadata.generation is equal to + // status.generation. + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"` + + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. + // +nullable + // +kubebuilder:validation:Required + // +required + AvailableUpdates []Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +listType=atomic + // +optional + ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` +} + +// UpdateState is a constant representing whether an update was successfully +// applied to the cluster or not. +type UpdateState string + +const ( + // CompletedUpdate indicates an update was successfully applied + // to the cluster (all resource updates were successful). + CompletedUpdate UpdateState = "Completed" + // PartialUpdate indicates an update was never completely applied + // or is currently being applied. + PartialUpdate UpdateState = "Partial" +) + +// UpdateHistory is a single attempted update to the cluster. +type UpdateHistory struct { + // state reflects whether the update was fully applied. The Partial state + // indicates the update is not fully applied, while the Completed state + // indicates the update was successfully rolled out at least once (all + // parts of the update successfully applied). + // +kubebuilder:validation:Required + // +required + State UpdateState `json:"state"` + + // startedTime is the time at which the update was started. + // +kubebuilder:validation:Required + // +required + StartedTime metav1.Time `json:"startedTime"` + + // completionTime, if set, is when the update was fully applied. The update + // that is currently being applied will have a null completion time. + // Completion time will always be set for entries that are not the current + // update (usually to the started time of the next update). + // +kubebuilder:validation:Required + // +required + // +nullable + CompletionTime *metav1.Time `json:"completionTime"` + + // version is a semantic versioning identifying the update version. If the + // requested image does not define a version, or if a failure occurs + // retrieving the image, this value may be empty. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. This value + // is always populated. + // +kubebuilder:validation:Required + // +required + Image string `json:"image"` + + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + // Verified does not cover upgradeable checks that depend on the cluster + // state at the time when the update target was accepted. + // +kubebuilder:validation:Required + // +required + Verified bool `json:"verified"` + + // acceptedRisks records risks which were accepted to initiate the update. + // For example, it may menition an Upgradeable=False or missing signature + // that was overriden via desiredUpdate.force, or an update that was + // initiated despite not being in the availableUpdates set of recommended + // update targets. + // +optional + AcceptedRisks string `json:"acceptedRisks,omitempty"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ClusterVersionCapability enumerates optional, core cluster components. +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace +type ClusterVersionCapability string + +const ( + // ClusterVersionCapabilityOpenShiftSamples manages the sample + // image streams and templates stored in the openshift + // namespace, and any registry credentials, stored as a secret, + // needed for the image streams to import the images they + // reference. + ClusterVersionCapabilityOpenShiftSamples ClusterVersionCapability = "openshift-samples" + + // ClusterVersionCapabilityBaremetal manages the cluster + // baremetal operator which is responsible for running the metal3 + // deployment. + ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal" + + // ClusterVersionCapabilityMarketplace manages the Marketplace operator which + // supplies Operator Lifecycle Manager (OLM) users with default catalogs of + // "optional" operators. + ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace" +) + +// KnownClusterVersionCapabilities includes all known optional, core cluster components. +var KnownClusterVersionCapabilities = []ClusterVersionCapability{ + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityOpenShiftSamples, +} + +// ClusterVersionCapabilitySet defines sets of cluster version capabilities. +// +kubebuilder:validation:Enum=None;v4.11;vCurrent +type ClusterVersionCapabilitySet string + +const ( + // ClusterVersionCapabilitySetNone is an empty set enabling + // no optional capabilities. + ClusterVersionCapabilitySetNone ClusterVersionCapabilitySet = "None" + + // ClusterVersionCapabilitySet4_11 is the recommended set of + // optional capabilities to enable for the 4.11 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_11 ClusterVersionCapabilitySet = "v4.11" + + // ClusterVersionCapabilitySetCurrent is the recommended set + // of optional capabilities to enable for the cluster's + // current version of OpenShift. + ClusterVersionCapabilitySetCurrent ClusterVersionCapabilitySet = "vCurrent" +) + +// ClusterVersionCapabilitySets defines sets of cluster version capabilities. +var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVersionCapability{ + ClusterVersionCapabilitySetNone: {}, + ClusterVersionCapabilitySet4_11: { + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, + }, + ClusterVersionCapabilitySetCurrent: { + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityMarketplace, + }, +} + +// ClusterVersionCapabilitiesSpec selects the managed set of +// optional, core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesSpec struct { + // baselineCapabilitySet selects an initial set of + // optional capabilities to enable, which can be extended via + // additionalEnabledCapabilities. If unset, the cluster will + // choose a default, and the default may change over time. + // The current default is vCurrent. + // +optional + BaselineCapabilitySet ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"` + + // additionalEnabledCapabilities extends the set of managed + // capabilities beyond the baseline defined in + // baselineCapabilitySet. The default is an empty set. + // +listType=atomic + // +optional + AdditionalEnabledCapabilities []ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"` +} + +// ClusterVersionCapabilitiesStatus describes the state of optional, +// core cluster components. +// +k8s:deepcopy-gen=true +type ClusterVersionCapabilitiesStatus struct { + // enabledCapabilities lists all the capabilities that are currently managed. + // +listType=atomic + // +optional + EnabledCapabilities []ClusterVersionCapability `json:"enabledCapabilities,omitempty"` + + // knownCapabilities lists all the capabilities known to the current cluster. + // +listType=atomic + // +optional + KnownCapabilities []ClusterVersionCapability `json:"knownCapabilities,omitempty"` +} + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +// +k8s:deepcopy-gen=true +type ComponentOverride struct { + // kind indentifies which object to override. + // +kubebuilder:validation:Required + // +required + Kind string `json:"kind"` + // group identifies the API group that the kind is in. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + + // namespace is the component's namespace. If the resource is cluster + // scoped, the namespace should be empty. + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + // name is the component's name. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // unmanaged controls if cluster version operator should stop managing the + // resources in this cluster. + // Default: false + // +kubebuilder:validation:Required + // +required + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// Update represents an administrator update request. +// +k8s:deepcopy-gen=true +type Update struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // + // +optional + Image string `json:"image"` + + // force allows an administrator to update to an image that has failed + // verification or upgradeable checks. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // +optional + Force bool `json:"force"` +} + +// Release represents an OpenShift release image and associated metadata. +// +k8s:deepcopy-gen=true +type Release struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // +required + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // +required + Image string `json:"image"` + + // url contains information about this release. This URL is set by + // the 'url' metadata property on a release or the metadata returned by + // the update API and should be displayed as a link in user + // interfaces. The URL field may not be set for test or nightly + // releases. + // +optional + URL URL `json:"url,omitempty"` + + // channels is the set of Cincinnati channels to which the release + // currently belongs. + // +optional + Channels []string `json:"channels,omitempty"` +} + +// RetrievedUpdates reports whether available updates have been retrieved from +// the upstream update server. The condition is Unknown before retrieval, False +// if the updates could not be retrieved or recently failed, or True if the +// availableUpdates field is accurate and recent. +const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" + +// ConditionalUpdate represents an update which is recommended to some +// clusters on the version the current cluster is reconciling, but which +// may not be recommended for the current cluster. +type ConditionalUpdate struct { + // release is the target of the update. + // +kubebuilder:validation:Required + // +required + Release Release `json:"release"` + + // risks represents the range of issues associated with + // updating to the target release. The cluster-version + // operator will evaluate all entries, and only recommend the + // update if there is at least one entry and all entries + // recommend the update. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +required + Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` + + // conditions represents the observations of the conditional update's + // current status. Known types are: + // * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. + // * Recommended, for whether the update is recommended for the current cluster. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// ConditionalUpdateRisk represents a reason and cluster-state +// for not recommending a conditional update. +// +k8s:deepcopy-gen=true +type ConditionalUpdateRisk struct { + // url contains information about this risk. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name is the CamelCase reason for not recommending a + // conditional update, in the event that matchingRules match the + // cluster state. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // message provides additional information about the risk of + // updating, in the event that matchingRules match the cluster + // state. This is only to be consumed by humans. It may + // contain Line Feed characters (U+000A), which should be + // rendered as new lines. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Message string `json:"message"` + + // matchingRules is a slice of conditions for deciding which + // clusters match the risk and which do not. The slice is + // ordered by decreasing precedence. The cluster-version + // operator will walk the slice in order, and stop after the + // first it can successfully evaluate. If no condition can be + // successfully evaluated, the update will not be recommended. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +required + MatchingRules []ClusterCondition `json:"matchingRules"` +} + +// ClusterCondition is a union of typed cluster conditions. The 'type' +// property determines which of the type-specific properties are relevant. +// When evaluated on a cluster, the condition may match, not match, or +// fail to evaluate. +// +k8s:deepcopy-gen=true +type ClusterCondition struct { + // type represents the cluster-condition type. This defines + // the members and semantics of any additional properties. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={"Always","PromQL"} + // +required + Type string `json:"type"` + + // promQL represents a cluster condition based on PromQL. + // +optional + PromQL *PromQLClusterCondition `json:"promql,omitempty"` +} + +// PromQLClusterCondition represents a cluster condition based on PromQL. +type PromQLClusterCondition struct { + // PromQL is a PromQL query classifying clusters. This query + // query should return a 1 in the match case and a 0 in the + // does-not-match case. Queries which return no time + // series, or which return values besides 0 or 1, are + // evaluation failures. + // +kubebuilder:validation:Required + // +required + PromQL string `json:"promql"` +} + +// ClusterVersionList is a list of ClusterVersion resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterVersion `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 000000000..e1a128827 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,69 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide configuration for the web console, including the +// logout URL, and reports the public URL of the console. The canonical name is +// `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ConsoleStatus `json:"status"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + // +optional + Authentication ConsoleAuthentication `json:"authentication"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + // The URL for the console. This will be derived from the host for the route that + // is created for the console. + ConsoleURL string `json:"consoleURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} + +// ConsoleAuthentication defines a list of optional configuration for console authentication. +type ConsoleAuthentication struct { + // An optional, absolute URL to redirect web browsers to after logging out of + // the console. If not specified, it will redirect to the default login page. + // This is required when using an identity provider that supports single + // sign-on (SSO) such as: + // - OpenID (Keycloak, Azure) + // - RequestHeader (GSSAPI, SSPI, SAML) + // - OAuth (GitHub, GitLab, Google) + // Logging out of the console will destroy the user's token. The logoutRedirect + // provides the user the option to perform single logout (SLO) through the identity + // provider to destroy their single sign-on session. + // +optional + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` + LogoutRedirect string `json:"logoutRedirect,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 000000000..c223f828e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,92 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { + // baseDomain is the base domain of the cluster. All managed DNS records will + // be sub-domains of this base. + // + // For example, given the base domain `openshift.example.com`, an API server + // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. + BaseDomain string `json:"baseDomain"` + // publicZone is the location where all the DNS records that are publicly accessible to + // the internet exist. + // + // If this field is nil, no public records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PublicZone *DNSZone `json:"publicZone,omitempty"` + // privateZone is the location where all the DNS records that are only available internally + // to the cluster exist. + // + // If this field is nil, no private records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PrivateZone *DNSZone `json:"privateZone,omitempty"` +} + +// DNSZone is used to define a DNS hosted zone. +// A zone can be identified by an ID or tags. +type DNSZone struct { + // id is the identifier that can be used to find the DNS hosted zone. + // + // on AWS zone can be fetched using `ID` as id in [1] + // on Azure zone can be fetched using `ID` as a pre-determined name in [2], + // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + // +optional + ID string `json:"id,omitempty"` + + // tags can be used to query the DNS hosted zone. + // + // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go new file mode 100644 index 000000000..7c34a1ef1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -0,0 +1,219 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec FeatureGateSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status FeatureGateStatus `json:"status"` +} + +type FeatureSet string + +var ( + // Default feature set that allows upgrades. + Default FeatureSet = "" + + // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. + CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + + // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. + LatencySensitive FeatureSet = "LatencySensitive" + + // IPv6DualStackNoUpgrade enables dual-stack. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + IPv6DualStackNoUpgrade FeatureSet = "IPv6DualStackNoUpgrade" +) + +type FeatureGateSpec struct { + FeatureGateSelection `json:",inline"` +} + +// +union +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + // +unionDiscriminator + // +optional + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + // +optional + // +nullable + CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` +} + +type CustomFeatureGates struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []string `json:"enabled,omitempty"` + // disabled is a list of all feature gates that you want to force off + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +type FeatureGateStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []FeatureGate `json:"items"` +} + +type FeatureGateEnabledDisabled struct { + Enabled []string + Disabled []string +} + +// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. +// +// NOTE: The caller needs to make sure to check for the existence of the value +// using golang's existence field. A possible scenario is an upgrade where new +// FeatureSets are added and a controller has not been upgraded with a newer +// version of this file. In this upgrade scenario the map could return nil. +// +// example: +// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } +// +// If you put an item in either of these lists, put your area and name on it so we can find owners. +var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ + Default: defaultFeatures, + CustomNoUpgrade: { + Enabled: []string{}, + Disabled: []string{}, + }, + TechPreviewNoUpgrade: newDefaultFeatures(). + with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureFile"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationvSphere"). // sig-storage, fbertina, Kubernetes feature gate + with("ExternalCloudProvider"). // sig-cloud-provider, jspeed, OCP specific + with("CSIDriverSharedResource"). // sig-build, adkaplan, OCP specific + with("BuildCSIVolumes"). // sig-build, adkaplan, OCP specific + with("NodeSwap"). // sig-node, ehashman, Kubernetes feature gate + with("MachineAPIProviderOpenStack"). // openstack, egarcia (#forum-openstack), OCP specific + toFeatures(), + LatencySensitive: newDefaultFeatures(). + with( + "TopologyManager", // sig-pod, sjenning + ). + toFeatures(), + IPv6DualStackNoUpgrade: newDefaultFeatures(). + with( + "IPv6DualStack", // sig-network, danwinship + ). + toFeatures(), +} + +var defaultFeatures = &FeatureGateEnabledDisabled{ + Enabled: []string{ + "APIPriorityAndFairness", // sig-apimachinery, deads2k + "RotateKubeletServerCertificate", // sig-pod, sjenning + "DownwardAPIHugePages", // sig-node, rphillips + "PodSecurity", // sig-auth, s-urbaniak + }, + Disabled: []string{ + "CSIMigrationAWS", // sig-storage, jsafrane + "CSIMigrationGCE", // sig-storage, jsafrane + "CSIMigrationAzureFile", // sig-storage, jsafrane + "CSIMigrationvSphere", // sig-storage, jsafrane + }, +} + +type featureSetBuilder struct { + forceOn []string + forceOff []string +} + +func newDefaultFeatures() *featureSetBuilder { + return &featureSetBuilder{} +} + +func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder { + f.forceOn = append(f.forceOn, forceOn...) + return f +} + +func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder { + f.forceOff = append(f.forceOff, forceOff...) + return f +} + +func (f *featureSetBuilder) isForcedOff(needle string) bool { + for _, forcedOff := range f.forceOff { + if needle == forcedOff { + return true + } + } + return false +} + +func (f *featureSetBuilder) isForcedOn(needle string) bool { + for _, forceOn := range f.forceOn { + if needle == forceOn { + return true + } + } + return false +} + +func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled { + finalOn := []string{} + finalOff := []string{} + + // only add the default enabled features if they haven't been explicitly set off + for _, defaultOn := range defaultFeatures.Enabled { + if !f.isForcedOff(defaultOn) { + finalOn = append(finalOn, defaultOn) + } + } + for _, currOn := range f.forceOn { + if f.isForcedOff(currOn) { + panic("coding error, you can't have features both on and off") + } + finalOn = append(finalOn, currOn) + } + + // only add the default disabled features if they haven't been explicitly set on + for _, defaultOff := range defaultFeatures.Disabled { + if !f.isForcedOn(defaultOff) { + finalOff = append(finalOff, defaultOff) + } + } + for _, currOff := range f.forceOff { + finalOff = append(finalOff, currOff) + } + + return &FeatureGateEnabledDisabled{ + Enabled: finalOn, + Disabled: finalOff, + } +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 000000000..08a31072d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,128 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to block or allow registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ImageStatus `json:"status"` +} + +type ImageSpec struct { + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + // +optional + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. + // The namespace for this config map is openshift-config. + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + + // registrySources contains configuration that determines how the container runtime + // should treat individual registries when accessing images for builds+pods. (e.g. + // whether or not to allow insecure access). It does not contain configuration for the + // internal cluster registry. + // +optional + RegistrySources RegistrySources `json:"registrySources"` +} + +type ImageStatus struct { + + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // This value is set by the image registry operator which controls the internal registry + // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + // +optional + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // domainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + // +optional + Insecure bool `json:"insecure,omitempty"` +} + +// RegistrySources holds cluster-wide information about how to handle the registries config. +type RegistrySources struct { + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // +optional + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified + // domains in their pull specs. Registries will be searched in the order provided in the list. + // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Format=hostname + // +listType=set + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go new file mode 100644 index 000000000..8ccad9c53 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -0,0 +1,89 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageContentPolicySpec `json:"spec"` +} + +// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. +type ImageContentPolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To pull image from mirrors by tags, should set the "allowMirrorByTags". + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + // +listType=map + // +listMapKey=source + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageContentPolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` + Source string `json:"source"` + // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. + // Pulling images by tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // +optional + AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` + // mirrors is zero or more repositories that may also contain the same images. + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. No mirror will be configured. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + // +listType=set + Mirrors []Mirror `json:"mirrors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` +type Mirror string diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go new file mode 100644 index 000000000..3ac506943 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -0,0 +1,126 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageDigestMirrorSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageDigestMirrorSetSpec `json:"spec"` +} + +// ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD. +type ImageDigestMirrorSetSpec struct { + // imageDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using tag specification, users should configure + // a list of mirrors using "ImageTagMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageDigestMirrors []ImageDigestMirrors `json:"imageDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageDigestMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageDigestMirrorSet `json:"items"` +} + +// +kubebuilder:validation:Pattern=`^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` +type ImageMirror string + +// MirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. +// +kubebuilder:validation:Enum=NeverContactSource;AllowContactingSource +type MirrorSourcePolicy string + +const ( + // NeverContactSource prevents image pull from the specified repository in the pull spec if the image pull from the mirror list fails. + NeverContactSource MirrorSourcePolicy = "NeverContactSource" + + // AllowContactingSource allows falling back to the specified repository in the pull spec if the image pull from the mirror list fails. + AllowContactingSource MirrorSourcePolicy = "AllowContactingSource" +) + +// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageDigestMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their digests. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" + // Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go new file mode 100644 index 000000000..5443705d2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -0,0 +1,113 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagMirrorSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageTagMirrorSetSpec `json:"spec"` +} + +// ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD. +type ImageTagMirrorSetSpec struct { + // imageTagMirrors allows images referenced by image tags in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in imageTagMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To use mirrors to pull images using digest specification only, users should configure + // a list of mirrors using "ImageDigestMirrorSet" CRD. + // + // If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects, + // only the objects which define the most specific namespace match will be used. + // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as + // the "source", only the objects using quay.io/libpod/busybox are going to apply + // for pull specification quay.io/libpod/busybox. + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order. + // +optional + // +listType=atomic + ImageTagMirrors []ImageTagMirrors `json:"imageTagMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagMirrorSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageTagMirrorSet `json:"items"` +} + +// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type ImageTagMirrors struct { + // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname + // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. + // "source" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // [*.]host + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` + Source string `json:"source"` + // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. + // Images can be pulled from these mirrors only if they are referenced by their tags. + // The mirrored location is obtained by replacing the part of the input reference that + // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, + // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo + // repository to be used. + // Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. + // Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. + // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be + // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". + // Other cluster configuration, including (but not limited to) other imageTagMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // "mirrors" uses one of the following formats: + // host[:port] + // host[:port]/namespace[/namespace…] + // host[:port]/namespace[/namespace…]/repo + // for more information about the format, see the document about the location field: + // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + // +optional + // +listType=set + Mirrors []ImageMirror `json:"mirrors,omitempty"` + // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. + // If unset, the image will continue to be pulled from the repository in the pull spec. + // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + // +optional + MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 000000000..5cfa463f0 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,781 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InfrastructureStatus `json:"status"` +} + +// InfrastructureSpec contains settings that apply to the cluster infrastructure. +type InfrastructureSpec struct { + // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. + // This configuration file is used to configure the Kubernetes cloud provider integration + // when using the built-in cloud provider integration or the external cloud controller manager. + // The namespace for this config map is openshift-config. + // + // cloudConfig should only be consumed by the kube_cloud_config controller. + // The controller is responsible for using the user configuration in the spec + // for various platforms and combining that with the user provided ConfigMap in this field + // to create a stitched kube cloud config. + // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace + // with the kube cloud config is stored in `cloud.conf` key. + // All the clients are expected to use the generated ConfigMap only. + // + // +optional + CloudConfig ConfigMapFileReference `json:"cloudConfig"` + + // platformSpec holds desired information specific to the underlying + // infrastructure provider. + PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` +} + +// InfrastructureStatus describes the infrastructure the cluster is leveraging. +type InfrastructureStatus struct { + // infrastructureName uniquely identifies a cluster with a human friendly name. + // Once set it should not be changed. Must be of max length 27 and must have only + // alphanumeric or hyphen characters. + InfrastructureName string `json:"infrastructureName"` + + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. + Platform PlatformType `json:"platform,omitempty"` + + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering + // etcd servers and clients. + // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` + + // apiServerURL is a valid URI with scheme 'https', address and + // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. + APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme 'https', + // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` + + // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // The 'External' mode indicates that the control plane is hosted externally to the cluster and that + // its components are not visible within the cluster. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External + ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` + + // infrastructureTopology expresses the expectations for infrastructure services that do not run on control + // plane nodes, usually indicated by a node selector for a `role` value + // other than `master`. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // NOTE: External topology mode is not applicable for this field. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica + InfrastructureTopology TopologyMode `json:"infrastructureTopology"` +} + +// TopologyMode defines the topology mode of the control/infra nodes. +// NOTE: Enum validation is specified in each field that uses this type, +// given that External value is not applicable to the InfrastructureTopology +// field. +type TopologyMode string + +const ( + // "HighlyAvailable" is for operators to configure high-availability as much as possible. + HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. + SingleReplicaTopologyMode TopologyMode = "SingleReplica" + + // "External" indicates that the component is running externally to the cluster. When specified + // as the control plane topology, operators should avoid scheduling workloads to masters or assume + // that any of the control plane components such as kubernetes API server or etcd are visible within + // the cluster. + ExternalTopologyMode TopologyMode = "External" +) + +// PlatformType is a specific supported infrastructure provider. +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix +type PlatformType string + +const ( + // AWSPlatformType represents Amazon Web Services infrastructure. + AWSPlatformType PlatformType = "AWS" + + // AzurePlatformType represents Microsoft Azure infrastructure. + AzurePlatformType PlatformType = "Azure" + + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + + // GCPPlatformType represents Google Cloud Platform infrastructure. + GCPPlatformType PlatformType = "GCP" + + // LibvirtPlatformType represents libvirt infrastructure. + LibvirtPlatformType PlatformType = "Libvirt" + + // OpenStackPlatformType represents OpenStack infrastructure. + OpenStackPlatformType PlatformType = "OpenStack" + + // NonePlatformType means there is no infrastructure provider. + NonePlatformType PlatformType = "None" + + // VSpherePlatformType represents VMWare vSphere infrastructure. + VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" + + // IBMCloudPlatformType represents IBM Cloud infrastructure. + IBMCloudPlatformType PlatformType = "IBMCloud" + + // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. + KubevirtPlatformType PlatformType = "KubeVirt" + + // EquinixMetalPlatformType represents Equinix Metal infrastructure. + EquinixMetalPlatformType PlatformType = "EquinixMetal" + + // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. + PowerVSPlatformType PlatformType = "PowerVS" + + // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. + AlibabaCloudPlatformType PlatformType = "AlibabaCloud" + + // NutanixPlatformType represents Nutanix infrastructure. + NutanixPlatformType PlatformType = "Nutanix" +) + +// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type +type IBMCloudProviderType string + +const ( + // Classic means that the IBM Cloud cluster is using classic infrastructure + IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" + + // VPC means that the IBM Cloud cluster is using VPC infrastructure + IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" + + // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure. + // This is utilized in IBM Cloud Satellite environments. + IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" +) + +// PlatformSpec holds the desired state specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +type PlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformSpec `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + + // Nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` +} + +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". + // Individual components may not support all platforms, and must handle + // unrecognized platforms as None if they do not support that platform. + // + // This value will be synced with to the `status.platform` and `status.platformStatus.type`. + // Currently this value cannot be changed once set. + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` + + // Nutanix contains settings specific to the Nutanix infrastructure provider. + // +optional + Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` +} + +// AWSServiceEndpoint store the configuration of a custom url to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AWSPlatformSpec struct { + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` + + // ServiceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Key string `json:"key"` + // value is the value of the tag. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Value string `json:"value"` +} + +// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AzurePlatformSpec struct{} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + + // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK + // with the appropriate Azure API endpoints. + // If empty, the value is equal to `AzurePublicCloud`. + // +optional + CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` + + // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + // +optional + ARMEndpoint string `json:"armEndpoint,omitempty"` +} + +// AzureCloudEnvironment is the name of the Azure cloud environment +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud +type AzureCloudEnvironment string + +const ( + // AzurePublicCloud is the general-purpose, public Azure cloud environment. + AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" + + // AzureUSGovernmentCloud is the Azure cloud environment for the US government. + AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" + + // AzureChinaCloud is the Azure cloud environment used in China. + AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" + + // AzureGermanCloud is the Azure cloud environment used in Germany. + AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" + + // AzureStackCloud is the Azure cloud environment used at the edge and on premises. + AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" +) + +// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type GCPPlatformSpec struct{} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` +} + +// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type BareMetalPlatformSpec struct{} + +// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +type BareMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // BareMetal deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OpenStackPlatformSpec struct{} + +// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +type OpenStackPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // cloudName is the name of the desired OpenStack cloud in the + // client configuration file (`clouds.yaml`). + CloudName string `json:"cloudName,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // OpenStack deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OvirtPlatformSpec struct{} + +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. +// This only includes fields that can be modified in the cluster. +type VSpherePlatformSpec struct{} + +// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +type VSpherePlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // vSphere deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type IBMCloudPlatformSpec struct{} + +//IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. +type IBMCloudPlatformStatus struct { + // Location is where the cluster has been deployed + Location string `json:"location,omitempty"` + + // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName,omitempty"` + + // ProviderType indicates the type of cluster that was created + ProviderType IBMCloudProviderType `json:"providerType,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` +} + +// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type KubevirtPlatformSpec struct{} + +// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. +type KubevirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type EquinixMetalPlatformSpec struct{} + +// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. +type EquinixMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// PowervsServiceEndpoint stores the configuration of a custom url to +// override existing defaults of PowerVS Services. +type PowerVSServiceEndpoint struct { + // name is the name of the Power VS service. + // Few of the services are + // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PowerVSPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +type PowerVSPlatformStatus struct { + // region holds the default Power VS region for new Power VS resources created by the cluster. + Region string `json:"region"` + + // zone holds the default zone for the new Power VS resources created by the cluster. + // Note: Currently only single-zone OCP clusters are supported + Zone string `json:"zone"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` +} + +// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AlibabaCloudPlatformSpec struct{} + +// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. +type AlibabaCloudPlatformStatus struct { + // region specifies the region for Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` + // +required + Region string `json:"region"` + // resourceGroupID is the ID of the resource group for the cluster. + // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$` + // +optional + ResourceGroupID string `json:"resourceGroupID,omitempty"` + // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` +} + +// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +type AlibabaCloudResourceTag struct { + // key is the key of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Key string `json:"key"` + // value is the value of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Value string `json:"value"` +} + +// NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. +// This only includes fields that can be modified in the cluster. +type NutanixPlatformSpec struct { + // prismCentral holds the endpoint address and port to access the Nutanix Prism Central. + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +kubebuilder:validation:Required + PrismCentral NutanixPrismEndpoint `json:"prismCentral"` + + // prismElements holds one or more endpoint address and port data to access the Nutanix + // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one + // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) + // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) + // spread over multiple Prism Elements (clusters) of the Prism Central. + // +kubebuilder:validation:Required + // +listType=map + // +listMapKey=name + PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` +} + +// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) +type NutanixPrismEndpoint struct { + // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=256 + Address string `json:"address"` + + // port is the port number to access the Nutanix Prism Central or Element (cluster) + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + Port int32 `json:"port"` +} + +// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) +type NutanixPrismElementEndpoint struct { + // name is the name of the Prism Element (cluster). This value will correspond with + // the cluster field configured on other resources (eg Machines, PVCs, etc). + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + + // endpoint holds the endpoint address and port data of the Prism Element (cluster). + // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. + // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the + // proxy spec.noProxy list. + // +kubebuilder:validation:Required + Endpoint NutanixPrismEndpoint `json:"endpoint"` +} + +// NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider. +type NutanixPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Infrastructure `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 000000000..c7fec9c03 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,252 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Ingress struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. + Domain string `json:"domain"` + + // appsDomain is an optional domain to use instead of the one specified + // in the domain field when a Route is created without specifying an explicit + // host. If appsDomain is nonempty, this value is used to generate default + // host values for Route. Unlike domain, appsDomain may be modified after + // installation. + // This assumes a new ingresscontroller has been setup with a wildcard + // certificate. + // +optional + AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` + + // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes + // matching the domainPattern/s and namespaceSelector/s that are specified in the policy. + // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route + // annotation, and affect route admission. + // + // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: + // "haproxy.router.openshift.io/hsts_header" + // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + // + // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route + // is rejected. + // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies + // determines the route's admission status. + // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then it may use any HSTS Policy annotation. + // + // The HSTS policy configuration may be changed after routes have already been created. An update to a previously + // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. + // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. + // + // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. + // +optional + RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"` +} + +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is an alias for hostname string validation. +// +// The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it +// allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric +// characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. +// ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$ +// +// The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, +// except that it allows hostnames longer than the maximum length: +// ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ +// +// Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname +// was saved via validation by the incorrect left operand of the | operator. +// +// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$` +type Hostname string + +type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` + + // defaultPlacement is set at installation time to control which + // nodes will host the ingress router pods by default. The options are + // control-plane nodes or worker nodes. + // + // This field works by dictating how the Cluster Ingress Operator will + // consider unset replicas and nodePlacement fields in IngressController + // resources when creating the corresponding Deployments. + // + // See the documentation for the IngressController replicas and nodePlacement + // fields for more information. + // + // When omitted, the default value is Workers + // + // +kubebuilder:validation:Enum:="ControlPlane";"Workers";"" + // +optional + DefaultPlacement DefaultPlacement `json:"defaultPlacement"` +} + +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +kubebuilder:validation:Required + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +kubebuilder:validation:Required + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type IngressList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Ingress `json:"items"` +} + +// DefaultPlacement defines the default placement of ingress router pods. +type DefaultPlacement string + +const ( + // "Workers" is for having router pods placed on worker nodes by default. + DefaultPlacementWorkers DefaultPlacement = "Workers" + + // "ControlPlane" is for having router pods placed on control-plane nodes by default. + DefaultPlacementControlPlane DefaultPlacement = "ControlPlane" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 000000000..59392a96d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,177 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + // +kubebuilder:validation:Required + // +required + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status NetworkStatus `json:"status"` +} + +// NetworkSpec is the desired network configuration. +// As a general rule, this SHOULD NOT be read directly. Instead, you should +// consume the NetworkStatus, as it indicates the currently deployed configuration. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +type NetworkSpec struct { + // IP address pool to use for pod IPs. + // This field is immutable after installation. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // This field is immutable after installation. + ServiceNetwork []string `json:"serviceNetwork"` + + // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). + // This should match a value that the cluster-network-operator understands, + // or else no networking will be installed. + // Currently supported values are: + // - OpenShiftSDN + // This field is immutable after installation. + NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` + + // The port range allowed for Services of type NodePort. + // If not specified, the default of 30000-32767 will be used. + // Such Services without a NodePort specified will have one + // automatically allocated from this range. + // This parameter can be updated after the cluster is + // installed. + // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` +} + +// NetworkStatus is the current network configuration. +type NetworkStatus struct { + // IP address pool to use for pod IPs. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // IP address pool for services. + // Currently, we only support a single entry here. + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + + // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + NetworkType string `json:"networkType,omitempty"` + + // ClusterNetworkMTU is the MTU for inter-pod networking. + ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // Migration contains the cluster network migration configuration. + Migration *NetworkMigration `json:"migration,omitempty"` +} + +// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs +// are allocated. +type ClusterNetworkEntry struct { + // The complete block for pod IPs. + CIDR string `json:"cidr"` + + // The size (prefix) of block to allocate to each node. If this + // field is not used by the plugin, it can be left unset. + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil or empty, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Network `json:"items"` +} + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // NetworkType is the target plugin that is to be deployed. + // Currently supported values are: OpenShiftSDN, OVNKubernetes + // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} + // +optional + NetworkType string `json:"networkType,omitempty"` + + // MTU contains the MTU migration configuration. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // Network contains MTU migration configuration for the default network. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // Machine contains MTU migration configuration for the machine's uplink. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // To is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // From is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go new file mode 100644 index 000000000..d927bd894 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -0,0 +1,149 @@ +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node holds cluster-wide information about node specific features. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:resource:path=nodes,scope=Cluster +// +kubebuilder:subresource:status +type Node struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec NodeSpec `json:"spec"` + + // status holds observed values. + // +optional + Status NodeStatus `json:"status"` +} + +type NodeSpec struct { + // CgroupMode determines the cgroups version on the node + // +optional + CgroupMode CgroupMode `json:"cgroupMode,omitempty"` + + // WorkerLatencyProfile determins the how fast the kubelet is updating + // the status and corresponding reaction of the cluster + // +optional + WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` +} + +type NodeStatus struct { + // WorkerLatencyProfileStatus provides the current status of WorkerLatencyProfile + // +optional + WorkerLatencyProfileStatus WorkerLatencyProfileStatus `json:"workerLatencyProfileStatus,omitempty"` +} + +// +kubebuilder:validation:Enum=v1;v2;"" +type CgroupMode string + +const ( + CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift + CgroupModeV1 CgroupMode = "v1" + CgroupModeV2 CgroupMode = "v2" + CgroupModeDefault CgroupMode = CgroupModeV1 +) + +// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction +type WorkerLatencyProfileType string + +const ( + // Medium Kubelet Update Frequency (heart-beat) and Average Reaction Time to unresponsive Node + MediumUpdateAverageReaction WorkerLatencyProfileType = "MediumUpdateAverageReaction" + + // Low Kubelet Update Frequency (heart-beat) and Slow Reaction Time to unresponsive Node + LowUpdateSlowReaction WorkerLatencyProfileType = "LowUpdateSlowReaction" + + // Default values of relavent Kubelet, Kube Controller Manager and Kube API Server + DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default" +) + +const ( + // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeStatusUpdateFrequency = 10 * time.Second + // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNodeMonitorGracePeriod = 40 * time.Second + // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultNotReadyTolerationSeconds = 300 + // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type + DefaultUnreachableTolerationSeconds = 300 + + // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeStatusUpdateFrequency = 20 * time.Second + // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNodeMonitorGracePeriod = 2 * time.Minute + // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumNotReadyTolerationSeconds = 60 + // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type + MediumUnreachableTolerationSeconds = 60 + + // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeStatusUpdateFrequency = 1 * time.Minute + // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNodeMonitorGracePeriod = 5 * time.Minute + // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowNotReadyTolerationSeconds = 60 + // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type + LowUnreachableTolerationSeconds = 60 +) + +// WorkerLatencyProfileStatus provides status information about the WorkerLatencyProfile rollout +type WorkerLatencyProfileStatus struct { + // conditions describes the state of the WorkerLatencyProfile and related components + // (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// Different condition types to be used by KubeletOperator, Kube Controller +// Manager Operator and Kube API Server Operator +const ( + // Progressing indicates that the updates to component (Kubelet or Controller + // Manager or Kube API Server) is actively rolling out, propagating changes to the + // respective arguments. + KubeletProgressing = "KubeletProgressing" + KubeControllerManagerProgressing = "KubeControllerManagerProgressing" + KubeAPIServerProgressing = "KubeAPIServerProgressing" + + // Complete indicates whether the component (Kubelet or Controller Manager or Kube API Server) + // is successfully updated the respective arguments. + KubeletComplete = "KubeletComplete" + KubeControllerManagerComplete = "KubeControllerManagerComplete" + KubeAPIServerComplete = "KubeAPIServerComplete" + + // Degraded indicates that the component (Kubelet or Controller Manager or Kube API Server) + // does not reach the state 'Complete' over a period of time + // resulting in either a lower quality or absence of service. + // If the component enters in this state, "Default" WorkerLatencyProfileType + // rollout will be initiated to restore the respective default arguments of all + // components. + KubeletDegraded = "KubeletDegraded" + KubeControllerManagerDegraded = "KubeControllerManagerDegraded" + KubeAPIServerDegraded = "KubeAPIServerDegraded" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NodeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Node `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 000000000..02fbbf9d4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,585 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OAuth Server and Identity Provider Config + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. +// It is used to configure the integrated OAuth server. +// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuth struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OAuthStatus `json:"status"` +} + +// OAuthSpec contains desired cluster auth configuration +type OAuthSpec struct { + // identityProviders is an ordered list of ways for a user to identify themselves. + // When this list is empty, no identities are provisioned for users. + // +optional + // +listType=atomic + IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + // +optional + Templates OAuthTemplates `json:"templates"` +} + +// OAuthStatus shows current known state of OAuth server in the cluster +type OAuthStatus struct { + // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +const ( + // LoginTemplateKey is the key of the login template in a secret + LoginTemplateKey = "login.html" + + // ProviderSelectionTemplateKey is the key for the provider selection template in a secret + ProviderSelectionTemplateKey = "providers.html" + + // ErrorsTemplateKey is the key for the errors template in a secret + ErrorsTemplateKey = "errors.html" + + // BindPasswordKey is the key for the LDAP bind password in a secret + BindPasswordKey = "bindPassword" + + // ClientSecretKey is the key for the oauth client secret data in a secret + ClientSecretKey = "clientSecret" + + // HTPasswdDataKey is the key for the htpasswd file data in a secret + HTPasswdDataKey = "htpasswd" +) + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is the name of a secret that specifies a go template to use to render the login page. + // The key "login.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default login page is used. + // If the specified template is not valid, the default login page is used. + // If unspecified, the default login page is used. + // The namespace for this secret is openshift-config. + // +optional + Login SecretNameReference `json:"login"` + + // providerSelection is the name of a secret that specifies a go template to use to render + // the provider selection page. + // The key "providers.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default provider selection page is used. + // If the specified template is not valid, the default provider selection page is used. + // If unspecified, the default provider selection page is used. + // The namespace for this secret is openshift-config. + // +optional + ProviderSelection SecretNameReference `json:"providerSelection"` + + // error is the name of a secret that specifies a go template to use to render error pages + // during the authentication or grant flow. + // The key "errors.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default error page is used. + // If the specified template is not valid, the default error page is used. + // If unspecified, the default error page is used. + // The namespace for this secret is openshift-config. + // +optional + Error SecretNameReference `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider. + // - It MUST be unique and not shared by any other identity provider used + // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" + // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + Name string `json:"name"` + + // mappingMethod determines how identities from this provider are mapped to users + // Defaults to "claim" + // +optional + MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` + + IdentityProviderConfig `json:",inline"` +} + +// MappingMethodType specifies how new identities should be mapped to users when they log in +type MappingMethodType string + +const ( + // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user + // with that user name is already mapped to another identity. + // Default. + MappingMethodClaim MappingMethodType = "claim" + + // MappingMethodLookup looks up existing users already mapped to an identity but does not + // automatically provision users or identities. Requires identities and users be set up + // manually or using an external process. + MappingMethodLookup MappingMethodType = "lookup" + + // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with + // that user name already exists, the identity is mapped to the existing user, adding to any + // existing identity mappings for the user. + MappingMethodAdd MappingMethodType = "add" +) + +type IdentityProviderType string + +const ( + // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth + IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" + + // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials + IdentityProviderTypeGitHub IdentityProviderType = "GitHub" + + // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials + IdentityProviderTypeGitLab IdentityProviderType = "GitLab" + + // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials + IdentityProviderTypeGoogle IdentityProviderType = "Google" + + // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file + IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" + + // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials + IdentityProviderTypeKeystone IdentityProviderType = "Keystone" + + // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials + IdentityProviderTypeLDAP IdentityProviderType = "LDAP" + + // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials + IdentityProviderTypeOpenID IdentityProviderType = "OpenID" + + // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials + IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" +) + +// IdentityProviderConfig contains configuration for using a specific identity provider +type IdentityProviderConfig struct { + // type identifies the identity provider type for this entry. + Type IdentityProviderType `json:"type"` + + // Provider-specific configuration + // The json tag MUST match the `Type` specified above, case-insensitively + // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided + + // basicAuth contains configuration options for the BasicAuth IdP + // +optional + BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` + + // github enables user authentication using GitHub credentials + // +optional + GitHub *GitHubIdentityProvider `json:"github,omitempty"` + + // gitlab enables user authentication using GitLab credentials + // +optional + GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` + + // google enables user authentication using Google credentials + // +optional + Google *GoogleIdentityProvider `json:"google,omitempty"` + + // htpasswd enables user authentication using an HTPasswd file to validate credentials + // +optional + HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` + + // keystone enables user authentication using keystone password credentials + // +optional + Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` + + // ldap enables user authentication using LDAP credentials + // +optional + LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` + + // openID enables user authentication using OpenID credentials + // +optional + OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` + + // requestHeader enables user authentication using request header credentials + // +optional + RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` +} + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server + OAuthRemoteConnectionInfo `json:",inline"` +} + +// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection +type OAuthRemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // tlsClientCert is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate to present when connecting to the server. + // The key "tls.crt" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientCert SecretNameReference `json:"tlsClientCert"` + + // tlsClientKey is an optional reference to a secret by name that contains the + // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. + // The key "tls.key" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientKey SecretNameReference `json:"tlsClientKey"` +} + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdIdentityProvider struct { + // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. + // The key "htpasswd" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // If the specified htpasswd data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + FileData SecretNameReference `json:"fileData"` +} + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPIdentityProvider struct { + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. + // The syntax of the URL is: + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + + // bindDN is an optional DN to bind with during the search phase. + // +optional + BindDN string `json:"bindDN"` + + // bindPassword is an optional reference to a secret by name + // containing a password to bind with during the search phase. + // The key "bindPassword" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + BindPassword SecretNameReference `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS + // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always + // attempt to connect using TLS, even when `insecure` is set to `true` + // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to + // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + Insecure bool `json:"insecure"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // First non-empty attribute is used. At least one attribute is required. If none of the listed + // attribute have a value, authentication fails. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystoneIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server + OAuthRemoteConnectionInfo `json:",inline"` + + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + + // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration + // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID + // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility + // +optional + // UseUsernameIdentity bool `json:"useUsernameIdentity"` +} + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when login is set to true. + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be + // redirected here. + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when challenge is set to true. + ChallengeURL string `json:"challengeURL"` + + // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // Specifically, it allows verification of incoming requests to prevent header spoofing. + // The key "ca.crt" is used to locate the data. + // If the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // The namespace for this config map is openshift-config. + ClientCA ConfigMapNameReference `json:"ca"` + + // clientCommonNames is an optional list of common names to require a match from. If empty, any + // client certificate validated against the clientCA bundle is considered authoritative. + // +optional + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // organizations optionally restricts which organizations are allowed to log in + // +optional + Organizations []string `json:"organizations,omitempty"` + + // teams optionally restricts which teams are allowed to log in. Format is /. + // +optional + Teams []string `json:"teams,omitempty"` + + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of + // GitHub Enterprise. + // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + // +optional + Hostname string `json:"hostname"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // This can only be configured when hostname is set to a non-empty value. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // url is the oauth server base URL + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // +optional + HostedDomain string `json:"hostedDomain"` +} + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + // +optional + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + + // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. + // It must use the https scheme with no query or fragment component. + Issuer string `json:"issuer"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. +// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." +const UserIDClaim = "sub" + +// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo +// responses +// +kubebuilder:validation:MinLength=1 +type OpenIDClaim string + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the sub claim + // +listType=atomic + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // +listType=atomic + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +listType=atomic + // +optional + Email []string `json:"email,omitempty"` + + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user. + // If multiple claims are specified, the first one with a non-empty value is used. + // +listType=atomic + // +optional + Groups []OpenIDClaim `json:"groups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []OAuth `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 000000000..67a029529 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,85 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:subresource:status +// +genclient +// +genclient:nonNamespaced +// +openshift:compatibility-gen:level=1 +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:",omitempty"` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 000000000..add6abf66 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,59 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProjectStatus `json:"status"` +} + +// TemplateReference references a template in a specific namespace. +// The namespace must be specified at the point of use. +type TemplateReference struct { + // name is the metadata.name of the referenced project request template + Name string `json:"name"` +} + +// ProjectSpec holds the project creation configuration. +type ProjectSpec struct { + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // +optional + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // This must point to a template in 'openshift-config' namespace. It is optional. + // If it is not specified, a default template is used. + // + // +optional + ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go new file mode 100644 index 000000000..01ee4690d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -0,0 +1,99 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Proxy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the proxy configuration + // +kubebuilder:validation:Required + // +required + Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` +} + +// ProxySpec contains cluster proxy creation configuration. +type ProxySpec struct { + // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` + + // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + // +optional + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: user-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // Custom CA certificate bundle. + // -----END CERTIFICATE----- + // + // +optional + TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProxyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Proxy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 000000000..a69d2a35c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,105 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Scheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec SchedulerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status SchedulerStatus `json:"status"` +} + +type SchedulerSpec struct { + // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. + // policy is a reference to a ConfigMap containing scheduler policy which has + // user specified predicates and priorities. If this ConfigMap is not available + // scheduler will default to use DefaultAlgorithmProvider. + // The namespace for this configmap is openshift-config. + // +optional + Policy ConfigMapNameReference `json:"policy,omitempty"` + // profile sets which scheduling profile should be set in order to configure scheduling + // decisions for new pods. + // + // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" + // Defaults to "LowNodeUtilization" + // +optional + Profile SchedulerProfile `json:"profile,omitempty"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces and creates an intersection with any existing + // nodeSelectors already set on a pod, additionally constraining that pod's selector. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` + // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // turned on, all the master nodes in the cluster will be made schedulable, + // so that workload pods can run on them. The default value for this field is false, + // meaning none of the master nodes are schedulable. + // Important Note: Once the workload pods start running on the master nodes, + // extreme care must be taken to ensure that cluster-critical control plane components + // are not impacted. + // Please turn on this field after doing due diligence. + // +optional + MastersSchedulable bool `json:"mastersSchedulable"` +} + +// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring +type SchedulerProfile string + +var ( + // LowNodeUtililization is the default, and defines a scheduling profile which prefers to + // spread pods evenly among nodes targeting low resource consumption on each node. + LowNodeUtilization SchedulerProfile = "LowNodeUtilization" + + // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto + // as few nodes as possible targeting a small node count but high resource usage on each node. + HighNodeUtilization SchedulerProfile = "HighNodeUtilization" + + // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling + // at the expense of potentially less optimal pod placement decisions. + NoScoring SchedulerProfile = "NoScoring" +) + +type SchedulerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Scheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 000000000..9dbacb996 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,262 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides + // the ability to specify individual TLS security profile parameters. + // Old, Intermediate and Modern are TLS security profiles based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // + // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers + // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be + // reduced. + // + // Note that the Modern profile is currently not supported because it is not + // yet well adopted by common software libraries. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + // old is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // - DHE-RSA-CHACHA20-POLY1305 + // - ECDHE-ECDSA-AES128-SHA256 + // - ECDHE-RSA-AES128-SHA256 + // - ECDHE-ECDSA-AES128-SHA + // - ECDHE-RSA-AES128-SHA + // - ECDHE-ECDSA-AES256-SHA384 + // - ECDHE-RSA-AES256-SHA384 + // - ECDHE-ECDSA-AES256-SHA + // - ECDHE-RSA-AES256-SHA + // - DHE-RSA-AES128-SHA256 + // - DHE-RSA-AES256-SHA256 + // - AES128-GCM-SHA256 + // - AES256-GCM-SHA384 + // - AES128-SHA256 + // - AES256-SHA256 + // - AES128-SHA + // - AES256-SHA + // - DES-CBC3-SHA + // minTLSVersion: TLSv1.0 + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + // intermediate is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // minTLSVersion: TLSv1.2 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + // modern is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // minTLSVersion: TLSv1.3 + // + // NOTE: Currently unsupported. + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // ciphers: + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // minTLSVersion: TLSv1.1 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom +type TLSProfileType string + +const ( + // Old is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + TLSProfileOldType TLSProfileType = "Old" + // Intermediate is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + TLSProfileIntermediateType TLSProfileType = "Intermediate" + // Modern is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + TLSProfileModernType TLSProfileType = "Modern" + // Custom is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: TLSv1.1 + // + // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// +// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all +// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, +// just be sure to whitelist only and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + "DHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d5f73e01f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,4825 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. +func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { + if in == nil { + return nil + } + out := new(APIServerEncryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerList) DeepCopyInto(out *APIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. +func (in *APIServerList) DeepCopy() *APIServerList { + if in == nil { + return nil + } + out := new(APIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ServingCertificate = in.ServingCertificate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. +func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { + if in == nil { + return nil + } + out := new(APIServerNamedServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { + *out = *in + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]APIServerNamedServingCert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. +func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { + if in == nil { + return nil + } + out := new(APIServerServingCerts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { + *out = *in + in.ServingCerts.DeepCopyInto(&out.ServingCerts) + out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Encryption = in.Encryption + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.Audit.DeepCopyInto(&out.Audit) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. +func (in *APIServerSpec) DeepCopy() *APIServerSpec { + if in == nil { + return nil + } + out := new(APIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. +func (in *APIServerStatus) DeepCopy() *APIServerStatus { + if in == nil { + return nil + } + out := new(APIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.EnabledAdmissionPlugins != nil { + in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAdmissionPlugins != nil { + in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. +func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AlibabaCloudResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. +func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. +func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { + if in == nil { + return nil + } + out := new(AlibabaCloudResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]AuditCustomRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. +func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { + if in == nil { + return nil + } + out := new(AuditCustomRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + out.OAuthMetadata = in.OAuthMetadata + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + if in.WebhookTokenAuthenticator != nil { + in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator + *out = new(WebhookTokenAuthenticator) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. +func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { + if in == nil { + return nil + } + out := new(BareMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. +func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { + if in == nil { + return nil + } + out := new(BareMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. +func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { + *out = *in + if in.DefaultProxy != nil { + in, out := &in.DefaultProxy, &out.DefaultProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.GitProxy != nil { + in, out := &in.GitProxy, &out.GitProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. +func (in *BuildDefaults) DeepCopy() *BuildDefaults { + if in == nil { + return nil + } + out := new(BuildDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { + *out = *in + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. +func (in *BuildOverrides) DeepCopy() *BuildOverrides { + if in == nil { + return nil + } + out := new(BuildOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) + in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + if in.PromQL != nil { + in, out := &in.PromQL, &out.PromQL + *out = new(PromQLClusterCondition) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. +func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]OperandVersion, len(*in)) + copy(*out, *in) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesSpec) DeepCopyInto(out *ClusterVersionCapabilitiesSpec) { + *out = *in + if in.AdditionalEnabledCapabilities != nil { + in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesSpec. +func (in *ClusterVersionCapabilitiesSpec) DeepCopy() *ClusterVersionCapabilitiesSpec { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionCapabilitiesStatus) DeepCopyInto(out *ClusterVersionCapabilitiesStatus) { + *out = *in + if in.EnabledCapabilities != nil { + in, out := &in.EnabledCapabilities, &out.EnabledCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + if in.KnownCapabilities != nil { + in, out := &in.KnownCapabilities, &out.KnownCapabilities + *out = make([]ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesStatus. +func (in *ClusterVersionCapabilitiesStatus) DeepCopy() *ClusterVersionCapabilitiesStatus { + if in == nil { + return nil + } + out := new(ClusterVersionCapabilitiesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. +func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { + if in == nil { + return nil + } + out := new(ClusterVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { + *out = *in + if in.DesiredUpdate != nil { + in, out := &in.DesiredUpdate, &out.DesiredUpdate + *out = new(Update) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(ClusterVersionCapabilitiesSpec) + (*in).DeepCopyInto(*out) + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ComponentOverride, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. +func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { + if in == nil { + return nil + } + out := new(ClusterVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Capabilities.DeepCopyInto(&out.Capabilities) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. +func (in *ComponentOverride) DeepCopy() *ComponentOverride { + if in == nil { + return nil + } + out := new(ComponentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { + *out = *in + in.Release.DeepCopyInto(&out.Release) + if in.Risks != nil { + in, out := &in.Risks, &out.Risks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. +func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { + if in == nil { + return nil + } + out := new(ConditionalUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { + *out = *in + if in.MatchingRules != nil { + in, out := &in.MatchingRules, &out.MatchingRules + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. +func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { + if in == nil { + return nil + } + out := new(ConditionalUpdateRisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. +func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { + if in == nil { + return nil + } + out := new(ConfigMapNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. +func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { + if in == nil { + return nil + } + out := new(ConsoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. +func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { + if in == nil { + return nil + } + out := new(CustomFeatureGates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.PublicZone != nil { + in, out := &in.PublicZone, &out.PublicZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + if in.PrivateZone != nil { + in, out := &in.PrivateZone, &out.PrivateZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(DeprecatedWebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. +func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. +func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { + if in == nil { + return nil + } + out := new(FeatureGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. +func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { + if in == nil { + return nil + } + out := new(FeatureGateEnabledDisabled) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { + if in == nil { + return nil + } + out := new(FeatureGateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.CustomNoUpgrade != nil { + in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade + *out = new(CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { + *out = *in + in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { + if in == nil { + return nil + } + out := new(FeatureGateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { + if in == nil { + return nil + } + out := new(FeatureGateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. +func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { + if in == nil { + return nil + } + out := new(GCPPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + out.KubeClientConfig = in.KubeClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { + if in == nil { + return nil + } + out := new(GenericAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. +func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { + if in == nil { + return nil + } + out := new(GenericControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { + *out = *in + out.FileData = in.FileData + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. +func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. +func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { + if in == nil { + return nil + } + out := new(IBMCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthIdentityProvider) + **out = **in + } + if in.GitHub != nil { + in, out := &in.GitHub, &out.GitHub + *out = new(GitHubIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.GitLab != nil { + in, out := &in.GitLab, &out.GitLab + *out = new(GitLabIdentityProvider) + **out = **in + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleIdentityProvider) + **out = **in + } + if in.HTPasswd != nil { + in, out := &in.HTPasswd, &out.HTPasswd + *out = new(HTPasswdIdentityProvider) + **out = **in + } + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneIdentityProvider) + **out = **in + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.OpenID != nil { + in, out := &in.OpenID, &out.OpenID + *out = new(OpenIDIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderIdentityProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. +func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { + if in == nil { + return nil + } + out := new(ImageContentPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. +func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { + if in == nil { + return nil + } + out := new(ImageContentPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. +func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { + if in == nil { + return nil + } + out := new(ImageContentPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSet) DeepCopyInto(out *ImageDigestMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSet. +func (in *ImageDigestMirrorSet) DeepCopy() *ImageDigestMirrorSet { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetList) DeepCopyInto(out *ImageDigestMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageDigestMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetList. +func (in *ImageDigestMirrorSetList) DeepCopy() *ImageDigestMirrorSetList { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageDigestMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrorSetSpec) DeepCopyInto(out *ImageDigestMirrorSetSpec) { + *out = *in + if in.ImageDigestMirrors != nil { + in, out := &in.ImageDigestMirrors, &out.ImageDigestMirrors + *out = make([]ImageDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetSpec. +func (in *ImageDigestMirrorSetSpec) DeepCopy() *ImageDigestMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageDigestMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestMirrors) DeepCopyInto(out *ImageDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrors. +func (in *ImageDigestMirrors) DeepCopy() *ImageDigestMirrors { + if in == nil { + return nil + } + out := new(ImageDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.RegistrySources.DeepCopyInto(&out.RegistrySources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSet) DeepCopyInto(out *ImageTagMirrorSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSet. +func (in *ImageTagMirrorSet) DeepCopy() *ImageTagMirrorSet { + if in == nil { + return nil + } + out := new(ImageTagMirrorSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetList) DeepCopyInto(out *ImageTagMirrorSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTagMirrorSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetList. +func (in *ImageTagMirrorSetList) DeepCopy() *ImageTagMirrorSetList { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagMirrorSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrorSetSpec) DeepCopyInto(out *ImageTagMirrorSetSpec) { + *out = *in + if in.ImageTagMirrors != nil { + in, out := &in.ImageTagMirrors, &out.ImageTagMirrors + *out = make([]ImageTagMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetSpec. +func (in *ImageTagMirrorSetSpec) DeepCopy() *ImageTagMirrorSetSpec { + if in == nil { + return nil + } + out := new(ImageTagMirrorSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagMirrors) DeepCopyInto(out *ImageTagMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]ImageMirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrors. +func (in *ImageTagMirrors) DeepCopy() *ImageTagMirrors { + if in == nil { + return nil + } + out := new(ImageTagMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + out.CloudConfig = in.CloudConfig + in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } + if in.RequiredHSTSPolicies != nil { + in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies + *out = make([]RequiredHSTSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. +func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { + if in == nil { + return nil + } + out := new(KeystoneIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. +func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { + if in == nil { + return nil + } + out := new(KubevirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { + *out = *in + out.BindPassword = in.BindPassword + out.CA = in.CA + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. +func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) { + *out = *in + if in.LargestMaxAge != nil { + in, out := &in.LargestMaxAge, &out.LargestMaxAge + *out = new(int32) + **out = **in + } + if in.SmallestMaxAge != nil { + in, out := &in.SmallestMaxAge, &out.SmallestMaxAge + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy. +func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy { + if in == nil { + return nil + } + out := new(MaxAgePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + in.WorkerLatencyProfileStatus.DeepCopyInto(&out.WorkerLatencyProfileStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { + *out = *in + out.PrismCentral = in.PrismCentral + if in.PrismElements != nil { + in, out := &in.PrismElements, &out.PrismElements + *out = make([]NutanixPrismElementEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformSpec. +func (in *NutanixPlatformSpec) DeepCopy() *NutanixPlatformSpec { + if in == nil { + return nil + } + out := new(NutanixPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformStatus. +func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus { + if in == nil { + return nil + } + out := new(NutanixPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint. +func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismElementEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint. +func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint { + if in == nil { + return nil + } + out := new(NutanixPrismEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { + *out = *in + out.CA = in.CA + out.TLSClientCert = in.TLSClientCert + out.TLSClientKey = in.TLSClientKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. +func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { + if in == nil { + return nil + } + out := new(OAuthRemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + out.Templates = in.Templates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + out.Login = in.Login + out.ProviderSelection = in.ProviderSelection + out.Error = in.Error + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]OpenIDClaim, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. +func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { + if in == nil { + return nil + } + out := new(OpenStackPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. +func (in *OperandVersion) DeepCopy() *OperandVersion { + if in == nil { + return nil + } + out := new(OperandVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. +func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { + if in == nil { + return nil + } + out := new(OvirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformSpec) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformSpec) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformSpec) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformSpec) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformSpec) + **out = **in + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformStatus) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformStatus) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformStatus) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformStatus) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformStatus) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformStatus) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(NutanixPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. +func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { + if in == nil { + return nil + } + out := new(PowerVSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. +func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { + if in == nil { + return nil + } + out := new(PowerVSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.ProjectRequestTemplate = in.ProjectRequestTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. +func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { + if in == nil { + return nil + } + out := new(PromQLClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Proxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyList) DeepCopyInto(out *ProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Proxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. +func (in *ProxyList) DeepCopy() *ProxyList { + if in == nil { + return nil + } + out := new(ProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.ReadinessEndpoints != nil { + in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { + *out = *in + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlockedRegistries != nil { + in, out := &in.BlockedRegistries, &out.BlockedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedRegistries != nil { + in, out := &in.AllowedRegistries, &out.AllowedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerRuntimeSearchRegistries != nil { + in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. +func (in *RegistrySources) DeepCopy() *RegistrySources { + if in == nil { + return nil + } + out := new(RegistrySources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]Mirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.ClientCA = in.ClientCA + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.DomainPatterns != nil { + in, out := &in.DomainPatterns, &out.DomainPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.MaxAge.DeepCopyInto(&out.MaxAge) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy. +func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy { + if in == nil { + return nil + } + out := new(RequiredHSTSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduler) DeepCopyInto(out *Scheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. +func (in *Scheduler) DeepCopy() *Scheduler { + if in == nil { + return nil + } + out := new(Scheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. +func (in *SchedulerList) DeepCopy() *SchedulerList { + if in == nil { + return nil + } + out := new(SchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { + *out = *in + out.Policy = in.Policy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. +func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { + if in == nil { + return nil + } + out := new(SchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. +func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { + if in == nil { + return nil + } + out := new(SchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. +func (in *SecretNameReference) DeepCopy() *SecretNameReference { + if in == nil { + return nil + } + out := new(SecretNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. +func (in *TemplateReference) DeepCopy() *TemplateReference { + if in == nil { + return nil + } + out := new(TemplateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Update) DeepCopyInto(out *Update) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. +func (in *Update) DeepCopy() *Update { + if in == nil { + return nil + } + out := new(Update) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { + *out = *in + in.StartedTime.DeepCopyInto(&out.StartedTime) + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. +func (in *UpdateHistory) DeepCopy() *UpdateHistory { + if in == nil { + return nil + } + out := new(UpdateHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. +func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. +func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { + if in == nil { + return nil + } + out := new(VSpherePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkerLatencyProfileStatus) DeepCopyInto(out *WorkerLatencyProfileStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerLatencyProfileStatus. +func (in *WorkerLatencyProfileStatus) DeepCopy() *WorkerLatencyProfileStatus { + if in == nil { + return nil + } + out := new(WorkerLatencyProfileStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..1e789e475 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,2042 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionConfig = map[string]string{ + "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", + "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "Location is the path to a configuration file that contains the plugin's configuration", + "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "CertFile is a file containing a PEM-encoded certificate", + "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_ConfigMapNameReference = map[string]string{ + "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced config map", +} + +func (ConfigMapNameReference) SwaggerDoc() map[string]string { + return map_ConfigMapNameReference +} + +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "URLs are the URLs for etcd", + "ca": "CA is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GenericAPIServerConfig = map[string]string{ + "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + "servingInfo": "servingInfo describes how to start serving", + "corsAllowedOrigins": "corsAllowedOrigins", + "auditConfig": "auditConfig describes how to configure audit information", + "storageConfig": "storageConfig contains information about how to use", + "admission": "admissionConfig holds information about how to configure admission.", +} + +func (GenericAPIServerConfig) SwaggerDoc() map[string]string { + return map_GenericAPIServerConfig +} + +var map_GenericControllerConfig = map[string]string{ + "": "GenericControllerConfig provides information to configure a controller", + "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericControllerConfig) SwaggerDoc() map[string]string { + return map_GenericControllerConfig +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_KubeClientConfig = map[string]string{ + "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (KubeClientConfig) SwaggerDoc() map[string]string { + return map_KubeClientConfig +} + +var map_LeaderElection = map[string]string{ + "": "LeaderElection provides information to elect a leader", + "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + "namespace": "namespace indicates which namespace the resource is in", + "name": "name indicates what name to use for the resource", + "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", +} + +func (LeaderElection) SwaggerDoc() map[string]string { + return map_LeaderElection +} + +var map_MaxAgePolicy = map[string]string{ + "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", + "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", + "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", +} + +func (MaxAgePolicy) SwaggerDoc() map[string]string { + return map_MaxAgePolicy +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "URL is the remote URL to connect to", + "ca": "CA is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequiredHSTSPolicy = map[string]string{ + "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", + "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", + "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", + "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", + "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", +} + +func (RequiredHSTSPolicy) SwaggerDoc() map[string]string { + return map_RequiredHSTSPolicy +} + +var map_SecretNameReference = map[string]string{ + "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced secret", +} + +func (SecretNameReference) SwaggerDoc() map[string]string { + return map_SecretNameReference +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "BindAddress is the ip:port to serve on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_APIServer = map[string]string{ + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (APIServer) SwaggerDoc() map[string]string { + return map_APIServer +} + +var map_APIServerEncryption = map[string]string{ + "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", +} + +func (APIServerEncryption) SwaggerDoc() map[string]string { + return map_APIServerEncryption +} + +var map_APIServerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (APIServerList) SwaggerDoc() map[string]string { + return map_APIServerList +} + +var map_APIServerNamedServingCert = map[string]string{ + "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", + "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", + "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", +} + +func (APIServerNamedServingCert) SwaggerDoc() map[string]string { + return map_APIServerNamedServingCert +} + +var map_APIServerServingCerts = map[string]string{ + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", +} + +func (APIServerServingCerts) SwaggerDoc() map[string]string { + return map_APIServerServingCerts +} + +var map_APIServerSpec = map[string]string{ + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", + "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", +} + +func (APIServerSpec) SwaggerDoc() map[string]string { + return map_APIServerSpec +} + +var map_Audit = map[string]string{ + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", + "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + +var map_AuditCustomRule = map[string]string{ + "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", + "group": "group is a name of group a request user must be member of in order to this profile to apply.", + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (AuditCustomRule) SwaggerDoc() map[string]string { + return map_AuditCustomRule +} + +var map_Authentication = map[string]string{ + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationSpec = map[string]string{ + "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", + "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.", +} + +func (AuthenticationSpec) SwaggerDoc() map[string]string { + return map_AuthenticationSpec +} + +var map_AuthenticationStatus = map[string]string{ + "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ + "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", + "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", +} + +func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_DeprecatedWebhookTokenAuthenticator +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", + "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +var map_Build = map[string]string{ + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec holds user-settable values for the build controller configuration", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildDefaults = map[string]string{ + "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "Resources defines resource requirements to execute the build.", +} + +func (BuildDefaults) SwaggerDoc() map[string]string { + return map_BuildDefaults +} + +var map_BuildList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildOverrides = map[string]string{ + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", +} + +func (BuildOverrides) SwaggerDoc() map[string]string { + return map_BuildOverrides +} + +var map_BuildSpec = map[string]string{ + "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "BuildDefaults controls the default information for Builds", + "buildOverrides": "BuildOverrides controls override settings for builds", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_ImageLabel = map[string]string{ + "name": "Name defines the name of the label. It must have non-zero length.", + "value": "Value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ClusterOperator = map[string]string{ + "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", +} + +func (ClusterOperator) SwaggerDoc() map[string]string { + return map_ClusterOperator +} + +var map_ClusterOperatorList = map[string]string{ + "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ClusterOperatorList) SwaggerDoc() map[string]string { + return map_ClusterOperatorList +} + +var map_ClusterOperatorSpec = map[string]string{ + "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", +} + +func (ClusterOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterOperatorSpec +} + +var map_ClusterOperatorStatus = map[string]string{ + "": "ClusterOperatorStatus provides information about the status of the operator.", + "conditions": "conditions describes the state of the operator's managed and monitored components.", + "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", + "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", + "extension": "extension contains any additional status information specific to the operator which owns this status object.", +} + +func (ClusterOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatus +} + +var map_ClusterOperatorStatusCondition = map[string]string{ + "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", + "type": "type specifies the aspect reported by this condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatusCondition +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent.", + "resource": "resource of the referent.", + "namespace": "namespace of the referent.", + "name": "name of the referent.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_OperandVersion = map[string]string{ + "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", + "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", +} + +func (OperandVersion) SwaggerDoc() map[string]string { + return map_OperandVersion +} + +var map_ClusterCondition = map[string]string{ + "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", + "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", + "promql": "promQL represents a cluster condition based on PromQL.", +} + +func (ClusterCondition) SwaggerDoc() map[string]string { + return map_ClusterCondition +} + +var map_ClusterVersion = map[string]string{ + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", +} + +func (ClusterVersion) SwaggerDoc() map[string]string { + return map_ClusterVersion +} + +var map_ClusterVersionCapabilitiesSpec = map[string]string{ + "": "ClusterVersionCapabilitiesSpec selects the managed set of optional, core cluster components.", + "baselineCapabilitySet": "baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent.", + "additionalEnabledCapabilities": "additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set.", +} + +func (ClusterVersionCapabilitiesSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesSpec +} + +var map_ClusterVersionCapabilitiesStatus = map[string]string{ + "": "ClusterVersionCapabilitiesStatus describes the state of optional, core cluster components.", + "enabledCapabilities": "enabledCapabilities lists all the capabilities that are currently managed.", + "knownCapabilities": "knownCapabilities lists all the capabilities known to the current cluster.", +} + +func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionCapabilitiesStatus +} + +var map_ClusterVersionList = map[string]string{ + "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ClusterVersionList) SwaggerDoc() map[string]string { + return map_ClusterVersionList +} + +var map_ClusterVersionSpec = map[string]string{ + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", +} + +func (ClusterVersionSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionSpec +} + +var map_ClusterVersionStatus = map[string]string{ + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "capabilities": "capabilities describes the state of optional, core cluster components.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", +} + +func (ClusterVersionStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionStatus +} + +var map_ComponentOverride = map[string]string{ + "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", + "kind": "kind indentifies which object to override.", + "group": "group identifies the API group that the kind is in.", + "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", + "name": "name is the component's name.", + "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", +} + +func (ComponentOverride) SwaggerDoc() map[string]string { + return map_ComponentOverride +} + +var map_ConditionalUpdate = map[string]string{ + "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", + "release": "release is the target of the update.", + "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", + "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.", +} + +func (ConditionalUpdate) SwaggerDoc() map[string]string { + return map_ConditionalUpdate +} + +var map_ConditionalUpdateRisk = map[string]string{ + "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "url": "url contains information about this risk.", + "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", + "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", +} + +func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { + return map_ConditionalUpdateRisk +} + +var map_PromQLClusterCondition = map[string]string{ + "": "PromQLClusterCondition represents a cluster condition based on PromQL.", + "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", +} + +func (PromQLClusterCondition) SwaggerDoc() map[string]string { + return map_PromQLClusterCondition +} + +var map_Release = map[string]string{ + "": "Release represents an OpenShift release image and associated metadata.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", +} + +func (Release) SwaggerDoc() map[string]string { + return map_Release +} + +var map_Update = map[string]string{ + "": "Update represents an administrator update request.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", +} + +func (Update) SwaggerDoc() map[string]string { + return map_Update +} + +var map_UpdateHistory = map[string]string{ + "": "UpdateHistory is a single attempted update to the cluster.", + "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", + "startedTime": "startedTime is the time at which the update was started.", + "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", + "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", + "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", +} + +func (UpdateHistory) SwaggerDoc() map[string]string { + return map_UpdateHistory +} + +var map_Console = map[string]string{ + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", + "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", +} + +func (ConsoleAuthentication) SwaggerDoc() map[string]string { + return map_ConsoleAuthentication +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", + "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DNS = map[string]string{ + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSSpec = map[string]string{ + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", + "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", + "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSZone = map[string]string{ + "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", + "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", + "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", +} + +func (DNSZone) SwaggerDoc() map[string]string { + return map_DNSZone +} + +var map_CustomFeatureGates = map[string]string{ + "enabled": "enabled is a list of all feature gates that you want to force on", + "disabled": "disabled is a list of all feature gates that you want to force off", +} + +func (CustomFeatureGates) SwaggerDoc() map[string]string { + return map_CustomFeatureGates +} + +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate +} + +var map_FeatureGateList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (FeatureGateList) SwaggerDoc() map[string]string { + return map_FeatureGateList +} + +var map_FeatureGateSelection = map[string]string{ + "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", + "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", +} + +func (FeatureGateSelection) SwaggerDoc() map[string]string { + return map_FeatureGateSelection +} + +var map_Image = map[string]string{ + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageSpec = map[string]string{ + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", +} + +func (ImageSpec) SwaggerDoc() map[string]string { + return map_ImageSpec +} + +var map_ImageStatus = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (ImageStatus) SwaggerDoc() map[string]string { + return map_ImageStatus +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RegistrySources = map[string]string{ + "": "RegistrySources holds cluster-wide information about how to handle the registries config.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", +} + +func (RegistrySources) SwaggerDoc() map[string]string { + return map_RegistrySources +} + +var map_ImageContentPolicy = map[string]string{ + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentPolicy) SwaggerDoc() map[string]string { + return map_ImageContentPolicy +} + +var map_ImageContentPolicyList = map[string]string{ + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageContentPolicyList) SwaggerDoc() map[string]string { + return map_ImageContentPolicyList +} + +var map_ImageContentPolicySpec = map[string]string{ + "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentPolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentPolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", + "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +var map_ImageDigestMirrorSet = map[string]string{ + "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", +} + +func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSet +} + +var map_ImageDigestMirrorSetList = map[string]string{ + "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetList +} + +var map_ImageDigestMirrorSetSpec = map[string]string{ + "": "ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.", + "imageDigestMirrors": "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageDigestMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageDigestMirrorSetSpec +} + +var map_ImageDigestMirrors = map[string]string{ + "": "ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageDigestMirrors) SwaggerDoc() map[string]string { + return map_ImageDigestMirrors +} + +var map_ImageTagMirrorSet = map[string]string{ + "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", +} + +func (ImageTagMirrorSet) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSet +} + +var map_ImageTagMirrorSetList = map[string]string{ + "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageTagMirrorSetList) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetList +} + +var map_ImageTagMirrorSetSpec = map[string]string{ + "": "ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.", + "imageTagMirrors": "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.", +} + +func (ImageTagMirrorSetSpec) SwaggerDoc() map[string]string { + return map_ImageTagMirrorSetSpec +} + +var map_ImageTagMirrors = map[string]string{ + "": "ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using \"ImageDigestMirrorSet\" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table", + "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.", +} + +func (ImageTagMirrors) SwaggerDoc() map[string]string { + return map_ImageTagMirrors +} + +var map_AWSPlatformSpec = map[string]string{ + "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", +} + +func (AWSPlatformSpec) SwaggerDoc() map[string]string { + return map_AWSPlatformSpec +} + +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key is the key of the tag", + "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + +var map_AWSServiceEndpoint = map[string]string{ + "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", + "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (AWSServiceEndpoint) SwaggerDoc() map[string]string { + return map_AWSServiceEndpoint +} + +var map_AlibabaCloudPlatformSpec = map[string]string{ + "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformSpec +} + +var map_AlibabaCloudPlatformStatus = map[string]string{ + "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", + "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", + "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", + "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", +} + +func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformStatus +} + +var map_AlibabaCloudResourceTag = map[string]string{ + "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", + "key": "key is the key of the tag.", + "value": "value is the value of the tag.", +} + +func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { + return map_AlibabaCloudResourceTag +} + +var map_AzurePlatformSpec = map[string]string{ + "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AzurePlatformSpec) SwaggerDoc() map[string]string { + return map_AzurePlatformSpec +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", + "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", + "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_BareMetalPlatformSpec = map[string]string{ + "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_BareMetalPlatformSpec +} + +var map_BareMetalPlatformStatus = map[string]string{ + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_BareMetalPlatformStatus +} + +var map_EquinixMetalPlatformSpec = map[string]string{ + "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformSpec +} + +var map_EquinixMetalPlatformStatus = map[string]string{ + "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformStatus +} + +var map_GCPPlatformSpec = map[string]string{ + "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (GCPPlatformSpec) SwaggerDoc() map[string]string { + return map_GCPPlatformSpec +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + +var map_IBMCloudPlatformSpec = map[string]string{ + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformSpec +} + +var map_IBMCloudPlatformStatus = map[string]string{ + "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", + "location": "Location is where the cluster has been deployed", + "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "ProviderType indicates the type of cluster that was created", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", +} + +func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformStatus +} + +var map_Infrastructure = map[string]string{ + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Infrastructure) SwaggerDoc() map[string]string { + return map_Infrastructure +} + +var map_InfrastructureList = map[string]string{ + "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (InfrastructureList) SwaggerDoc() map[string]string { + return map_InfrastructureList +} + +var map_InfrastructureSpec = map[string]string{ + "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", + "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", + "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", +} + +func (InfrastructureSpec) SwaggerDoc() map[string]string { + return map_InfrastructureSpec +} + +var map_InfrastructureStatus = map[string]string{ + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", + "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", +} + +func (InfrastructureStatus) SwaggerDoc() map[string]string { + return map_InfrastructureStatus +} + +var map_KubevirtPlatformSpec = map[string]string{ + "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { + return map_KubevirtPlatformSpec +} + +var map_KubevirtPlatformStatus = map[string]string{ + "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { + return map_KubevirtPlatformStatus +} + +var map_NutanixPlatformSpec = map[string]string{ + "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", + "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", + "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.", +} + +func (NutanixPlatformSpec) SwaggerDoc() map[string]string { + return map_NutanixPlatformSpec +} + +var map_NutanixPlatformStatus = map[string]string{ + "": "NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (NutanixPlatformStatus) SwaggerDoc() map[string]string { + return map_NutanixPlatformStatus +} + +var map_NutanixPrismElementEndpoint = map[string]string{ + "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)", + "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).", + "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", +} + +func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismElementEndpoint +} + +var map_NutanixPrismEndpoint = map[string]string{ + "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)", + "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)", + "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)", +} + +func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { + return map_NutanixPrismEndpoint +} + +var map_OpenStackPlatformSpec = map[string]string{ + "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { + return map_OpenStackPlatformSpec +} + +var map_OpenStackPlatformStatus = map[string]string{ + "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { + return map_OpenStackPlatformStatus +} + +var map_OvirtPlatformSpec = map[string]string{ + "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OvirtPlatformSpec) SwaggerDoc() map[string]string { + return map_OvirtPlatformSpec +} + +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + +var map_PlatformSpec = map[string]string{ + "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", +} + +func (PlatformSpec) SwaggerDoc() map[string]string { + return map_PlatformSpec +} + +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + +var map_PowerVSPlatformSpec = map[string]string{ + "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", +} + +func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { + return map_PowerVSPlatformSpec +} + +var map_PowerVSPlatformStatus = map[string]string{ + "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", + "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", + "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", +} + +func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { + return map_PowerVSPlatformStatus +} + +var map_PowerVSServiceEndpoint = map[string]string{ + "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", + "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { + return map_PowerVSServiceEndpoint +} + +var map_VSpherePlatformSpec = map[string]string{ + "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (VSpherePlatformSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformSpec +} + +var map_VSpherePlatformStatus = map[string]string{ + "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (VSpherePlatformStatus) SwaggerDoc() map[string]string { + return map_VSpherePlatformStatus +} + +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + +var map_Ingress = map[string]string{ + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressSpec = map[string]string{ + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", + "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", + "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", + "cidr": "The complete block for pod IPs.", + "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "Network contains MTU migration configuration for the default network.", + "machine": "Machine contains MTU migration configuration for the machine's uplink.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "To is the MTU to migrate to.", + "from": "From is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_Network = map[string]string{ + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", + "mtu": "MTU contains the MTU migration configuration.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", + "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is the current network configuration.", + "clusterNetwork": "IP address pool to use for pod IPs.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", + "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", + "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "Migration contains the cluster network migration configuration.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_Node = map[string]string{ + "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values.", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeSpec = map[string]string{ + "cgroupMode": "CgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "workerLatencyProfileStatus": "WorkerLatencyProfileStatus provides the current status of WorkerLatencyProfile", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_WorkerLatencyProfileStatus = map[string]string{ + "": "WorkerLatencyProfileStatus provides status information about the WorkerLatencyProfile rollout", + "conditions": "conditions describes the state of the WorkerLatencyProfile and related components (Kubelet or Controller Manager or Kube API Server) whether progressing, degraded or complete", +} + +func (WorkerLatencyProfileStatus) SwaggerDoc() map[string]string { + return map_WorkerLatencyProfileStatus +} + +var map_BasicAuthIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "url": "url is the oauth server base URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_HTPasswdIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_IdentityProviderConfig = map[string]string{ + "": "IdentityProviderConfig contains configuration for using a specific identity provider", + "type": "type identifies the identity provider type for this entry.", + "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", + "github": "github enables user authentication using GitHub credentials", + "gitlab": "gitlab enables user authentication using GitLab credentials", + "google": "google enables user authentication using Google credentials", + "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", + "keystone": "keystone enables user authentication using keystone password credentials", + "ldap": "ldap enables user authentication using LDAP credentials", + "openID": "openID enables user authentication using OpenID credentials", + "requestHeader": "requestHeader enables user authentication using request header credentials", +} + +func (IdentityProviderConfig) SwaggerDoc() map[string]string { + return map_IdentityProviderConfig +} + +var map_KeystoneIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "domainName is required for keystone v3", +} + +func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystoneIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPIdentityProvider +} + +var map_OAuth = map[string]string{ + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OAuth) SwaggerDoc() map[string]string { + return map_OAuth +} + +var map_OAuthList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OAuthList) SwaggerDoc() map[string]string { + return map_OAuthList +} + +var map_OAuthRemoteConnectionInfo = map[string]string{ + "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", + "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_OAuthRemoteConnectionInfo +} + +var map_OAuthSpec = map[string]string{ + "": "OAuthSpec contains desired cluster auth configuration", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthSpec) SwaggerDoc() map[string]string { + return map_OAuthSpec +} + +var map_OAuthStatus = map[string]string{ + "": "OAuthStatus shows current known state of OAuth server in the cluster", +} + +func (OAuthStatus) SwaggerDoc() map[string]string { + return map_OAuthStatus +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", + "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", + "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", + "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + +var map_Project = map[string]string{ + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec holds the project creation configuration.", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_TemplateReference = map[string]string{ + "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced project request template", +} + +func (TemplateReference) SwaggerDoc() map[string]string { + return map_TemplateReference +} + +var map_Proxy = map[string]string{ + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Proxy) SwaggerDoc() map[string]string { + return map_Proxy +} + +var map_ProxyList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ProxyList) SwaggerDoc() map[string]string { + return map_ProxyList +} + +var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", +} + +func (ProxySpec) SwaggerDoc() map[string]string { + return map_ProxySpec +} + +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + +var map_Scheduler = map[string]string{ + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Scheduler) SwaggerDoc() map[string]string { + return map_Scheduler +} + +var map_SchedulerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (SchedulerList) SwaggerDoc() map[string]string { + return map_SchedulerList +} + +var map_SchedulerSpec = map[string]string{ + "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", +} + +func (SchedulerSpec) SwaggerDoc() map[string]string { + return map_SchedulerSpec +} + +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/red-hat-storage/ocs-operator/services/provider/LICENSE b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/red-hat-storage/ocs-operator/services/provider/client/client.go b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/client/client.go new file mode 100644 index 000000000..d09eda835 --- /dev/null +++ b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/client/client.go @@ -0,0 +1,198 @@ +package client + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + pb "github.com/red-hat-storage/ocs-operator/services/provider/pb" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type OCSProviderClient struct { + Client pb.OCSProviderClient + clientConn *grpc.ClientConn + timeout time.Duration +} + +// NewProviderClient creates a client to talk to the external OCS storage provider server +func NewProviderClient(ctx context.Context, serverAddr string, timeout time.Duration) (*OCSProviderClient, error) { + apiCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + config := &tls.Config{ + InsecureSkipVerify: true, + } + + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(credentials.NewTLS(config)), + grpc.WithBlock(), + } + + conn, err := grpc.DialContext(apiCtx, serverAddr, opts...) + if err != nil { + return nil, fmt.Errorf("failed to dial: %v", err) + } + + return &OCSProviderClient{ + Client: pb.NewOCSProviderClient(conn), + clientConn: conn, + timeout: timeout}, nil +} + +// Close closes the gRPC connection of the external OCS storage provider client +func (cc *OCSProviderClient) Close() { + if cc.clientConn != nil { + _ = cc.clientConn.Close() + cc.clientConn = nil + } + cc.Client = nil +} + +// OnboardConsumer to validate the consumer and create StorageConsumer +// resource on the StorageProvider cluster +func (cc *OCSProviderClient) OnboardConsumer(ctx context.Context, ticket, name, capacity string) (*pb.OnboardConsumerResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.OnboardConsumerRequest{ + OnboardingTicket: ticket, + ConsumerName: name, + Capacity: capacity, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.OnboardConsumer(apiCtx, req) +} + +// GetStorageConfig generates the json config for connecting to storage provider cluster +func (cc *OCSProviderClient) GetStorageConfig(ctx context.Context, consumerUUID string) (*pb.StorageConfigResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.StorageConfigRequest{ + StorageConsumerUUID: consumerUUID, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.GetStorageConfig(apiCtx, req) +} + +// OffboardConsumer deletes the StorageConsumer CR on the storage provider cluster +func (cc *OCSProviderClient) OffboardConsumer(ctx context.Context, consumerUUID string) (*pb.OffboardConsumerResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.OffboardConsumerRequest{ + StorageConsumerUUID: consumerUUID, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.OffboardConsumer(apiCtx, req) +} + +// UpdateCapacity increases or decreases the storage block pool size +func (cc *OCSProviderClient) UpdateCapacity(ctx context.Context, consumerUUID, capacity string) (*pb.UpdateCapacityResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.UpdateCapacityRequest{ + StorageConsumerUUID: consumerUUID, + Capacity: capacity, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.UpdateCapacity(apiCtx, req) +} + +func (cc *OCSProviderClient) AcknowledgeOnboarding(ctx context.Context, consumerUUID string) (*pb.AcknowledgeOnboardingResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.AcknowledgeOnboardingRequest{ + StorageConsumerUUID: consumerUUID, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.AcknowledgeOnboarding(apiCtx, req) +} + +type StorageType uint + +const ( + StorageTypeBlockpool StorageType = iota + StorageTypeSharedfilesystem +) + +func (cc *OCSProviderClient) FulfillStorageClassClaim(ctx context.Context, consumerUUID, storageClassClaimName, encryptionMethod string, storageType StorageType) (*pb.FulfillStorageClassClaimResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + var st pb.FulfillStorageClassClaimRequest_StorageType + if storageType == StorageTypeSharedfilesystem { + st = pb.FulfillStorageClassClaimRequest_SHAREDFILESYSTEM + } else if storageType == StorageTypeBlockpool { + st = pb.FulfillStorageClassClaimRequest_BLOCKPOOL + } + + req := &pb.FulfillStorageClassClaimRequest{ + StorageConsumerUUID: consumerUUID, + StorageClassClaimName: storageClassClaimName, + EncryptionMethod: encryptionMethod, + StorageType: st, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.FulfillStorageClassClaim(apiCtx, req) +} + +func (cc *OCSProviderClient) RevokeStorageClassClaim(ctx context.Context, consumerUUID, storageClassClaimName string) (*pb.RevokeStorageClassClaimResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.RevokeStorageClassClaimRequest{ + StorageConsumerUUID: consumerUUID, + StorageClassClaimName: storageClassClaimName, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.RevokeStorageClassClaim(apiCtx, req) +} + +func (cc *OCSProviderClient) GetStorageClassClaimConfig(ctx context.Context, consumerUUID, storageClassClaimName string) (*pb.StorageClassClaimConfigResponse, error) { + if cc.Client == nil || cc.clientConn == nil { + return nil, fmt.Errorf("provider client is closed") + } + + req := &pb.StorageClassClaimConfigRequest{ + StorageConsumerUUID: consumerUUID, + StorageClassClaimName: storageClassClaimName, + } + + apiCtx, cancel := context.WithTimeout(ctx, cc.timeout) + defer cancel() + + return cc.Client.GetStorageClassClaimConfig(apiCtx, req) +} diff --git a/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider.pb.go b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider.pb.go new file mode 100644 index 000000000..93d804730 --- /dev/null +++ b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider.pb.go @@ -0,0 +1,1425 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.14.0 +// source: provider.proto + +package providerpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StorageType of the storageclassClaim +type FulfillStorageClassClaimRequest_StorageType int32 + +const ( + FulfillStorageClassClaimRequest_SHAREDFILESYSTEM FulfillStorageClassClaimRequest_StorageType = 0 + FulfillStorageClassClaimRequest_BLOCKPOOL FulfillStorageClassClaimRequest_StorageType = 1 +) + +// Enum value maps for FulfillStorageClassClaimRequest_StorageType. +var ( + FulfillStorageClassClaimRequest_StorageType_name = map[int32]string{ + 0: "SHAREDFILESYSTEM", + 1: "BLOCKPOOL", + } + FulfillStorageClassClaimRequest_StorageType_value = map[string]int32{ + "SHAREDFILESYSTEM": 0, + "BLOCKPOOL": 1, + } +) + +func (x FulfillStorageClassClaimRequest_StorageType) Enum() *FulfillStorageClassClaimRequest_StorageType { + p := new(FulfillStorageClassClaimRequest_StorageType) + *p = x + return p +} + +func (x FulfillStorageClassClaimRequest_StorageType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FulfillStorageClassClaimRequest_StorageType) Descriptor() protoreflect.EnumDescriptor { + return file_provider_proto_enumTypes[0].Descriptor() +} + +func (FulfillStorageClassClaimRequest_StorageType) Type() protoreflect.EnumType { + return &file_provider_proto_enumTypes[0] +} + +func (x FulfillStorageClassClaimRequest_StorageType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FulfillStorageClassClaimRequest_StorageType.Descriptor instead. +func (FulfillStorageClassClaimRequest_StorageType) EnumDescriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{11, 0} +} + +// OnboardConsumerRequest holds the required information to validate the consumer and create StorageConsumer +// resource on the StorageProvider cluster +type OnboardConsumerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // onboardingTicket authenticates the storage consumer cluster + OnboardingTicket string `protobuf:"bytes,1,opt,name=onboardingTicket,proto3" json:"onboardingTicket,omitempty"` + // consumerName is the name of the consumer that is used to create the storageConsumer resource + ConsumerName string `protobuf:"bytes,2,opt,name=consumerName,proto3" json:"consumerName,omitempty"` + // capacity is the desired storage requested by the consumer cluster + Capacity string `protobuf:"bytes,3,opt,name=capacity,proto3" json:"capacity,omitempty"` +} + +func (x *OnboardConsumerRequest) Reset() { + *x = OnboardConsumerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OnboardConsumerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OnboardConsumerRequest) ProtoMessage() {} + +func (x *OnboardConsumerRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OnboardConsumerRequest.ProtoReflect.Descriptor instead. +func (*OnboardConsumerRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{0} +} + +func (x *OnboardConsumerRequest) GetOnboardingTicket() string { + if x != nil { + return x.OnboardingTicket + } + return "" +} + +func (x *OnboardConsumerRequest) GetConsumerName() string { + if x != nil { + return x.ConsumerName + } + return "" +} + +func (x *OnboardConsumerRequest) GetCapacity() string { + if x != nil { + return x.Capacity + } + return "" +} + +// OnboardConsumerResponse holds the response for OnboardConsumer API request +type OnboardConsumerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // K8s UID (UUID) of the consumer cluster + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` + // grantedCapacity is the storage granted by the provider cluster + GrantedCapacity string `protobuf:"bytes,2,opt,name=grantedCapacity,proto3" json:"grantedCapacity,omitempty"` +} + +func (x *OnboardConsumerResponse) Reset() { + *x = OnboardConsumerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OnboardConsumerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OnboardConsumerResponse) ProtoMessage() {} + +func (x *OnboardConsumerResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OnboardConsumerResponse.ProtoReflect.Descriptor instead. +func (*OnboardConsumerResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{1} +} + +func (x *OnboardConsumerResponse) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +func (x *OnboardConsumerResponse) GetGrantedCapacity() string { + if x != nil { + return x.GrantedCapacity + } + return "" +} + +// StorageConfigRequest holds the information required generate the json config for connecting to storage provider cluster +type StorageConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // K8s UID (UUID) of the consumer cluster + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` +} + +func (x *StorageConfigRequest) Reset() { + *x = StorageConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageConfigRequest) ProtoMessage() {} + +func (x *StorageConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageConfigRequest.ProtoReflect.Descriptor instead. +func (*StorageConfigRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{2} +} + +func (x *StorageConfigRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +// ExternalResource holds the configuration data of the resources in external storage cluster +type ExternalResource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the external storage cluster resource + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + // Kind of the external storage cluster resource + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Data contains the contents of the external cluster resource + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` +} + +func (x *ExternalResource) Reset() { + *x = ExternalResource{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalResource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalResource) ProtoMessage() {} + +func (x *ExternalResource) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalResource.ProtoReflect.Descriptor instead. +func (*ExternalResource) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{3} +} + +func (x *ExternalResource) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ExternalResource) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (x *ExternalResource) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// StorageConfigResponse holds the response for the GetStorageConfig API request +type StorageConfigResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ExternalResource holds the configuration data of external storage cluster + ExternalResource []*ExternalResource `protobuf:"bytes,1,rep,name=externalResource,proto3" json:"externalResource,omitempty"` +} + +func (x *StorageConfigResponse) Reset() { + *x = StorageConfigResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageConfigResponse) ProtoMessage() {} + +func (x *StorageConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageConfigResponse.ProtoReflect.Descriptor instead. +func (*StorageConfigResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{4} +} + +func (x *StorageConfigResponse) GetExternalResource() []*ExternalResource { + if x != nil { + return x.ExternalResource + } + return nil +} + +// OffboardConsumerRequest holds the required information to delete the StorageConsumer CR on the storage provider cluster. +type OffboardConsumerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // K8s UID (UUID) of the consumer cluster + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` +} + +func (x *OffboardConsumerRequest) Reset() { + *x = OffboardConsumerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffboardConsumerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffboardConsumerRequest) ProtoMessage() {} + +func (x *OffboardConsumerRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffboardConsumerRequest.ProtoReflect.Descriptor instead. +func (*OffboardConsumerRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{5} +} + +func (x *OffboardConsumerRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +// OffboardConsumerResponse holds the response for the OffboardConsumer API request +type OffboardConsumerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *OffboardConsumerResponse) Reset() { + *x = OffboardConsumerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffboardConsumerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffboardConsumerResponse) ProtoMessage() {} + +func (x *OffboardConsumerResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffboardConsumerResponse.ProtoReflect.Descriptor instead. +func (*OffboardConsumerResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{6} +} + +// UpdateCapacityRequest holds the information required to increase or decrease the block pool size +// on the provider cluster +type UpdateCapacityRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // K8s UID (UUID) of the consumer cluster + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` + // capacity is the desired storage requested by the consumer cluster + Capacity string `protobuf:"bytes,2,opt,name=capacity,proto3" json:"capacity,omitempty"` +} + +func (x *UpdateCapacityRequest) Reset() { + *x = UpdateCapacityRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCapacityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCapacityRequest) ProtoMessage() {} + +func (x *UpdateCapacityRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCapacityRequest.ProtoReflect.Descriptor instead. +func (*UpdateCapacityRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateCapacityRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +func (x *UpdateCapacityRequest) GetCapacity() string { + if x != nil { + return x.Capacity + } + return "" +} + +// UpdateCapacityResponse holds the response for UpdateCapacity API request +type UpdateCapacityResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // grantedCapacity is the storage granted by the provider cluster + GrantedCapacity string `protobuf:"bytes,2,opt,name=grantedCapacity,proto3" json:"grantedCapacity,omitempty"` +} + +func (x *UpdateCapacityResponse) Reset() { + *x = UpdateCapacityResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCapacityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCapacityResponse) ProtoMessage() {} + +func (x *UpdateCapacityResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCapacityResponse.ProtoReflect.Descriptor instead. +func (*UpdateCapacityResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateCapacityResponse) GetGrantedCapacity() string { + if x != nil { + return x.GrantedCapacity + } + return "" +} + +// AcknowledgeOnboardingRequest holds the information required to acknowledge the onboarding +type AcknowledgeOnboardingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // K8s UID (UUID) of the consumer cluster + StorageConsumerUUID string `protobuf:"bytes,1,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` +} + +func (x *AcknowledgeOnboardingRequest) Reset() { + *x = AcknowledgeOnboardingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AcknowledgeOnboardingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AcknowledgeOnboardingRequest) ProtoMessage() {} + +func (x *AcknowledgeOnboardingRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AcknowledgeOnboardingRequest.ProtoReflect.Descriptor instead. +func (*AcknowledgeOnboardingRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{9} +} + +func (x *AcknowledgeOnboardingRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +// AcknowledgeOnboardingResponse holds the response for the AcknowledgeOnboarding API request +type AcknowledgeOnboardingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AcknowledgeOnboardingResponse) Reset() { + *x = AcknowledgeOnboardingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AcknowledgeOnboardingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AcknowledgeOnboardingResponse) ProtoMessage() {} + +func (x *AcknowledgeOnboardingResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AcknowledgeOnboardingResponse.ProtoReflect.Descriptor instead. +func (*AcknowledgeOnboardingResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{10} +} + +// FulfillStorageClassClaimRequest holds the information required to +// create the StorageclassClaim CR on provider cluster. +type FulfillStorageClassClaimRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name of the storageclassclaim on the consumer cluster. + StorageClassClaimName string `protobuf:"bytes,1,opt,name=storageClassClaimName,proto3" json:"storageClassClaimName,omitempty"` + // K8s UID (UUID) of the consumer cluster. + StorageConsumerUUID string `protobuf:"bytes,2,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` + // encryption method of the storageclassclaim. + EncryptionMethod string `protobuf:"bytes,3,opt,name=encryptionMethod,proto3" json:"encryptionMethod,omitempty"` + StorageType FulfillStorageClassClaimRequest_StorageType `protobuf:"varint,4,opt,name=storageType,proto3,enum=provider.FulfillStorageClassClaimRequest_StorageType" json:"storageType,omitempty"` +} + +func (x *FulfillStorageClassClaimRequest) Reset() { + *x = FulfillStorageClassClaimRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FulfillStorageClassClaimRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FulfillStorageClassClaimRequest) ProtoMessage() {} + +func (x *FulfillStorageClassClaimRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FulfillStorageClassClaimRequest.ProtoReflect.Descriptor instead. +func (*FulfillStorageClassClaimRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{11} +} + +func (x *FulfillStorageClassClaimRequest) GetStorageClassClaimName() string { + if x != nil { + return x.StorageClassClaimName + } + return "" +} + +func (x *FulfillStorageClassClaimRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +func (x *FulfillStorageClassClaimRequest) GetEncryptionMethod() string { + if x != nil { + return x.EncryptionMethod + } + return "" +} + +func (x *FulfillStorageClassClaimRequest) GetStorageType() FulfillStorageClassClaimRequest_StorageType { + if x != nil { + return x.StorageType + } + return FulfillStorageClassClaimRequest_SHAREDFILESYSTEM +} + +// FulfillStorageClassClaimResponse holds the response for the FulfillStorageClassClaim API request. +type FulfillStorageClassClaimResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *FulfillStorageClassClaimResponse) Reset() { + *x = FulfillStorageClassClaimResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FulfillStorageClassClaimResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FulfillStorageClassClaimResponse) ProtoMessage() {} + +func (x *FulfillStorageClassClaimResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FulfillStorageClassClaimResponse.ProtoReflect.Descriptor instead. +func (*FulfillStorageClassClaimResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{12} +} + +// RevokeStorageClassClaimRequest holds the information required to delete the +// StorageclassClaim CR on provider cluster. +type RevokeStorageClassClaimRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name of the storageclassclaim on the consumer cluster. + StorageClassClaimName string `protobuf:"bytes,1,opt,name=storageClassClaimName,proto3" json:"storageClassClaimName,omitempty"` + // K8s UID (UUID) of the consumer cluster. + StorageConsumerUUID string `protobuf:"bytes,2,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` +} + +func (x *RevokeStorageClassClaimRequest) Reset() { + *x = RevokeStorageClassClaimRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeStorageClassClaimRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeStorageClassClaimRequest) ProtoMessage() {} + +func (x *RevokeStorageClassClaimRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeStorageClassClaimRequest.ProtoReflect.Descriptor instead. +func (*RevokeStorageClassClaimRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{13} +} + +func (x *RevokeStorageClassClaimRequest) GetStorageClassClaimName() string { + if x != nil { + return x.StorageClassClaimName + } + return "" +} + +func (x *RevokeStorageClassClaimRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +// RevokeStorageClassClaimResponse holds the response for the RevokeStorageClassClaim API request. +type RevokeStorageClassClaimResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RevokeStorageClassClaimResponse) Reset() { + *x = RevokeStorageClassClaimResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeStorageClassClaimResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeStorageClassClaimResponse) ProtoMessage() {} + +func (x *RevokeStorageClassClaimResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeStorageClassClaimResponse.ProtoReflect.Descriptor instead. +func (*RevokeStorageClassClaimResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{14} +} + +// StorageClassClaimConfigRequest holds the information required to generate the +// json config for StorageClassClaim specific resources. +type StorageClassClaimConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name of the storageclassclaim on the consumer cluster. + StorageClassClaimName string `protobuf:"bytes,1,opt,name=storageClassClaimName,proto3" json:"storageClassClaimName,omitempty"` + // K8s UID (UUID) of the consumer cluster. + StorageConsumerUUID string `protobuf:"bytes,2,opt,name=storageConsumerUUID,proto3" json:"storageConsumerUUID,omitempty"` +} + +func (x *StorageClassClaimConfigRequest) Reset() { + *x = StorageClassClaimConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageClassClaimConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageClassClaimConfigRequest) ProtoMessage() {} + +func (x *StorageClassClaimConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageClassClaimConfigRequest.ProtoReflect.Descriptor instead. +func (*StorageClassClaimConfigRequest) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{15} +} + +func (x *StorageClassClaimConfigRequest) GetStorageClassClaimName() string { + if x != nil { + return x.StorageClassClaimName + } + return "" +} + +func (x *StorageClassClaimConfigRequest) GetStorageConsumerUUID() string { + if x != nil { + return x.StorageConsumerUUID + } + return "" +} + +// StorageClassClaimConfigResponse holds the response for the StorageClassClaimConfig API request. +type StorageClassClaimConfigResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ExternalResource holds the configuration data of external storage cluster + ExternalResource []*ExternalResource `protobuf:"bytes,1,rep,name=externalResource,proto3" json:"externalResource,omitempty"` +} + +func (x *StorageClassClaimConfigResponse) Reset() { + *x = StorageClassClaimConfigResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_provider_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageClassClaimConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageClassClaimConfigResponse) ProtoMessage() {} + +func (x *StorageClassClaimConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_provider_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageClassClaimConfigResponse.ProtoReflect.Descriptor instead. +func (*StorageClassClaimConfigResponse) Descriptor() ([]byte, []int) { + return file_provider_proto_rawDescGZIP(), []int{16} +} + +func (x *StorageClassClaimConfigResponse) GetExternalResource() []*ExternalResource { + if x != nil { + return x.ExternalResource + } + return nil +} + +var File_provider_proto protoreflect.FileDescriptor + +var file_provider_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, + 0x16, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x6f, 0x6e, 0x62, 0x6f, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x6f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6d, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x22, 0x75, 0x0a, 0x17, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, + 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, + 0x12, 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x6e, 0x74, + 0x65, 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0x48, 0x0a, 0x14, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, + 0x55, 0x55, 0x49, 0x44, 0x22, 0x4e, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x44, 0x61, 0x74, 0x61, 0x22, 0x5f, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x4b, 0x0a, 0x17, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, + 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, + 0x49, 0x44, 0x22, 0x1a, 0x0a, 0x18, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, + 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x65, + 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x70, + 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x70, + 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0x42, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x28, 0x0a, 0x0f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65, 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65, + 0x64, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0x50, 0x0a, 0x1c, 0x41, 0x63, 0x6b, + 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x22, 0x1f, 0x0a, 0x1d, 0x41, + 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc2, 0x02, 0x0a, + 0x1f, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x34, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x15, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, + 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x32, 0x0a, + 0x0b, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, + 0x53, 0x48, 0x41, 0x52, 0x45, 0x44, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x50, 0x4f, 0x4f, 0x4c, 0x10, + 0x01, 0x22, 0x22, 0x0a, 0x20, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, + 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, + 0x0a, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, + 0x72, 0x55, 0x55, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, + 0x22, 0x21, 0x0a, 0x1f, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x13, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, + 0x55, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x55, 0x55, 0x49, 0x44, 0x22, 0x69, + 0x0a, 0x1f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, + 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x46, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x32, 0xba, 0x06, 0x0a, 0x0b, 0x4f, 0x43, + 0x53, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x0f, 0x4f, 0x6e, 0x62, + 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x12, 0x20, 0x2e, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, + 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x4f, 0x66, + 0x66, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x12, 0x21, + 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x66, 0x66, 0x62, 0x6f, 0x61, + 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x4f, 0x66, 0x66, + 0x62, 0x6f, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, + 0x0a, 0x15, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, + 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, + 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x62, 0x6f, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x18, 0x46, 0x75, + 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x46, 0x75, 0x6c, + 0x66, 0x69, 0x6c, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x70, 0x0a, 0x17, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x2e, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x73, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_provider_proto_rawDescOnce sync.Once + file_provider_proto_rawDescData = file_provider_proto_rawDesc +) + +func file_provider_proto_rawDescGZIP() []byte { + file_provider_proto_rawDescOnce.Do(func() { + file_provider_proto_rawDescData = protoimpl.X.CompressGZIP(file_provider_proto_rawDescData) + }) + return file_provider_proto_rawDescData +} + +var file_provider_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_provider_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_provider_proto_goTypes = []interface{}{ + (FulfillStorageClassClaimRequest_StorageType)(0), // 0: provider.FulfillStorageClassClaimRequest.StorageType + (*OnboardConsumerRequest)(nil), // 1: provider.OnboardConsumerRequest + (*OnboardConsumerResponse)(nil), // 2: provider.OnboardConsumerResponse + (*StorageConfigRequest)(nil), // 3: provider.StorageConfigRequest + (*ExternalResource)(nil), // 4: provider.ExternalResource + (*StorageConfigResponse)(nil), // 5: provider.StorageConfigResponse + (*OffboardConsumerRequest)(nil), // 6: provider.OffboardConsumerRequest + (*OffboardConsumerResponse)(nil), // 7: provider.OffboardConsumerResponse + (*UpdateCapacityRequest)(nil), // 8: provider.UpdateCapacityRequest + (*UpdateCapacityResponse)(nil), // 9: provider.UpdateCapacityResponse + (*AcknowledgeOnboardingRequest)(nil), // 10: provider.AcknowledgeOnboardingRequest + (*AcknowledgeOnboardingResponse)(nil), // 11: provider.AcknowledgeOnboardingResponse + (*FulfillStorageClassClaimRequest)(nil), // 12: provider.FulfillStorageClassClaimRequest + (*FulfillStorageClassClaimResponse)(nil), // 13: provider.FulfillStorageClassClaimResponse + (*RevokeStorageClassClaimRequest)(nil), // 14: provider.RevokeStorageClassClaimRequest + (*RevokeStorageClassClaimResponse)(nil), // 15: provider.RevokeStorageClassClaimResponse + (*StorageClassClaimConfigRequest)(nil), // 16: provider.StorageClassClaimConfigRequest + (*StorageClassClaimConfigResponse)(nil), // 17: provider.StorageClassClaimConfigResponse +} +var file_provider_proto_depIdxs = []int32{ + 4, // 0: provider.StorageConfigResponse.externalResource:type_name -> provider.ExternalResource + 0, // 1: provider.FulfillStorageClassClaimRequest.storageType:type_name -> provider.FulfillStorageClassClaimRequest.StorageType + 4, // 2: provider.StorageClassClaimConfigResponse.externalResource:type_name -> provider.ExternalResource + 1, // 3: provider.OCSProvider.OnboardConsumer:input_type -> provider.OnboardConsumerRequest + 3, // 4: provider.OCSProvider.GetStorageConfig:input_type -> provider.StorageConfigRequest + 6, // 5: provider.OCSProvider.OffboardConsumer:input_type -> provider.OffboardConsumerRequest + 8, // 6: provider.OCSProvider.UpdateCapacity:input_type -> provider.UpdateCapacityRequest + 10, // 7: provider.OCSProvider.AcknowledgeOnboarding:input_type -> provider.AcknowledgeOnboardingRequest + 12, // 8: provider.OCSProvider.FulfillStorageClassClaim:input_type -> provider.FulfillStorageClassClaimRequest + 14, // 9: provider.OCSProvider.RevokeStorageClassClaim:input_type -> provider.RevokeStorageClassClaimRequest + 16, // 10: provider.OCSProvider.GetStorageClassClaimConfig:input_type -> provider.StorageClassClaimConfigRequest + 2, // 11: provider.OCSProvider.OnboardConsumer:output_type -> provider.OnboardConsumerResponse + 5, // 12: provider.OCSProvider.GetStorageConfig:output_type -> provider.StorageConfigResponse + 7, // 13: provider.OCSProvider.OffboardConsumer:output_type -> provider.OffboardConsumerResponse + 9, // 14: provider.OCSProvider.UpdateCapacity:output_type -> provider.UpdateCapacityResponse + 11, // 15: provider.OCSProvider.AcknowledgeOnboarding:output_type -> provider.AcknowledgeOnboardingResponse + 13, // 16: provider.OCSProvider.FulfillStorageClassClaim:output_type -> provider.FulfillStorageClassClaimResponse + 15, // 17: provider.OCSProvider.RevokeStorageClassClaim:output_type -> provider.RevokeStorageClassClaimResponse + 17, // 18: provider.OCSProvider.GetStorageClassClaimConfig:output_type -> provider.StorageClassClaimConfigResponse + 11, // [11:19] is the sub-list for method output_type + 3, // [3:11] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_provider_proto_init() } +func file_provider_proto_init() { + if File_provider_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_provider_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OnboardConsumerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OnboardConsumerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffboardConsumerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffboardConsumerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCapacityRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCapacityResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AcknowledgeOnboardingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AcknowledgeOnboardingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FulfillStorageClassClaimRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FulfillStorageClassClaimResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeStorageClassClaimRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeStorageClassClaimResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageClassClaimConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_provider_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageClassClaimConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_provider_proto_rawDesc, + NumEnums: 1, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_provider_proto_goTypes, + DependencyIndexes: file_provider_proto_depIdxs, + EnumInfos: file_provider_proto_enumTypes, + MessageInfos: file_provider_proto_msgTypes, + }.Build() + File_provider_proto = out.File + file_provider_proto_rawDesc = nil + file_provider_proto_goTypes = nil + file_provider_proto_depIdxs = nil +} diff --git a/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider_grpc.pb.go b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider_grpc.pb.go new file mode 100644 index 000000000..c17451660 --- /dev/null +++ b/vendor/github.com/red-hat-storage/ocs-operator/services/provider/pb/provider_grpc.pb.go @@ -0,0 +1,377 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package providerpb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OCSProviderClient is the client API for OCSProvider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OCSProviderClient interface { + // OnboardConsumer RPC call to validate the consumer and create StorageConsumer + // resource on the StorageProvider cluster + OnboardConsumer(ctx context.Context, in *OnboardConsumerRequest, opts ...grpc.CallOption) (*OnboardConsumerResponse, error) + // GetStorageConfig RPC call to generate the json config for connecting to storage provider cluster + GetStorageConfig(ctx context.Context, in *StorageConfigRequest, opts ...grpc.CallOption) (*StorageConfigResponse, error) + // OffboardConsumer RPC call to delete StorageConsumer CR on the storage provider cluster. + OffboardConsumer(ctx context.Context, in *OffboardConsumerRequest, opts ...grpc.CallOption) (*OffboardConsumerResponse, error) + // UpdateCapacity PRC call to increase or decrease the block pool size + UpdateCapacity(ctx context.Context, in *UpdateCapacityRequest, opts ...grpc.CallOption) (*UpdateCapacityResponse, error) + // AcknowledgeOnboarding RPC call acknowledge the onboarding + AcknowledgeOnboarding(ctx context.Context, in *AcknowledgeOnboardingRequest, opts ...grpc.CallOption) (*AcknowledgeOnboardingResponse, error) + // FulfillStorageClassClaim RPC call to create the StorageclassClaim CR on + // provider cluster. + FulfillStorageClassClaim(ctx context.Context, in *FulfillStorageClassClaimRequest, opts ...grpc.CallOption) (*FulfillStorageClassClaimResponse, error) + // RevokeStorageClassClaim RPC call to delete the StorageclassClaim CR + // on provider cluster. + RevokeStorageClassClaim(ctx context.Context, in *RevokeStorageClassClaimRequest, opts ...grpc.CallOption) (*RevokeStorageClassClaimResponse, error) + // GetStorageClassClaimConfig RPC call to generate the json config for claim + // specific resources. + GetStorageClassClaimConfig(ctx context.Context, in *StorageClassClaimConfigRequest, opts ...grpc.CallOption) (*StorageClassClaimConfigResponse, error) +} + +type oCSProviderClient struct { + cc grpc.ClientConnInterface +} + +func NewOCSProviderClient(cc grpc.ClientConnInterface) OCSProviderClient { + return &oCSProviderClient{cc} +} + +func (c *oCSProviderClient) OnboardConsumer(ctx context.Context, in *OnboardConsumerRequest, opts ...grpc.CallOption) (*OnboardConsumerResponse, error) { + out := new(OnboardConsumerResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/OnboardConsumer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) GetStorageConfig(ctx context.Context, in *StorageConfigRequest, opts ...grpc.CallOption) (*StorageConfigResponse, error) { + out := new(StorageConfigResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/GetStorageConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) OffboardConsumer(ctx context.Context, in *OffboardConsumerRequest, opts ...grpc.CallOption) (*OffboardConsumerResponse, error) { + out := new(OffboardConsumerResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/OffboardConsumer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) UpdateCapacity(ctx context.Context, in *UpdateCapacityRequest, opts ...grpc.CallOption) (*UpdateCapacityResponse, error) { + out := new(UpdateCapacityResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/UpdateCapacity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) AcknowledgeOnboarding(ctx context.Context, in *AcknowledgeOnboardingRequest, opts ...grpc.CallOption) (*AcknowledgeOnboardingResponse, error) { + out := new(AcknowledgeOnboardingResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/AcknowledgeOnboarding", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) FulfillStorageClassClaim(ctx context.Context, in *FulfillStorageClassClaimRequest, opts ...grpc.CallOption) (*FulfillStorageClassClaimResponse, error) { + out := new(FulfillStorageClassClaimResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/FulfillStorageClassClaim", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) RevokeStorageClassClaim(ctx context.Context, in *RevokeStorageClassClaimRequest, opts ...grpc.CallOption) (*RevokeStorageClassClaimResponse, error) { + out := new(RevokeStorageClassClaimResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/RevokeStorageClassClaim", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *oCSProviderClient) GetStorageClassClaimConfig(ctx context.Context, in *StorageClassClaimConfigRequest, opts ...grpc.CallOption) (*StorageClassClaimConfigResponse, error) { + out := new(StorageClassClaimConfigResponse) + err := c.cc.Invoke(ctx, "/provider.OCSProvider/GetStorageClassClaimConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OCSProviderServer is the server API for OCSProvider service. +// All implementations must embed UnimplementedOCSProviderServer +// for forward compatibility +type OCSProviderServer interface { + // OnboardConsumer RPC call to validate the consumer and create StorageConsumer + // resource on the StorageProvider cluster + OnboardConsumer(context.Context, *OnboardConsumerRequest) (*OnboardConsumerResponse, error) + // GetStorageConfig RPC call to generate the json config for connecting to storage provider cluster + GetStorageConfig(context.Context, *StorageConfigRequest) (*StorageConfigResponse, error) + // OffboardConsumer RPC call to delete StorageConsumer CR on the storage provider cluster. + OffboardConsumer(context.Context, *OffboardConsumerRequest) (*OffboardConsumerResponse, error) + // UpdateCapacity PRC call to increase or decrease the block pool size + UpdateCapacity(context.Context, *UpdateCapacityRequest) (*UpdateCapacityResponse, error) + // AcknowledgeOnboarding RPC call acknowledge the onboarding + AcknowledgeOnboarding(context.Context, *AcknowledgeOnboardingRequest) (*AcknowledgeOnboardingResponse, error) + // FulfillStorageClassClaim RPC call to create the StorageclassClaim CR on + // provider cluster. + FulfillStorageClassClaim(context.Context, *FulfillStorageClassClaimRequest) (*FulfillStorageClassClaimResponse, error) + // RevokeStorageClassClaim RPC call to delete the StorageclassClaim CR + // on provider cluster. + RevokeStorageClassClaim(context.Context, *RevokeStorageClassClaimRequest) (*RevokeStorageClassClaimResponse, error) + // GetStorageClassClaimConfig RPC call to generate the json config for claim + // specific resources. + GetStorageClassClaimConfig(context.Context, *StorageClassClaimConfigRequest) (*StorageClassClaimConfigResponse, error) + mustEmbedUnimplementedOCSProviderServer() +} + +// UnimplementedOCSProviderServer must be embedded to have forward compatible implementations. +type UnimplementedOCSProviderServer struct { +} + +func (UnimplementedOCSProviderServer) OnboardConsumer(context.Context, *OnboardConsumerRequest) (*OnboardConsumerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OnboardConsumer not implemented") +} +func (UnimplementedOCSProviderServer) GetStorageConfig(context.Context, *StorageConfigRequest) (*StorageConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStorageConfig not implemented") +} +func (UnimplementedOCSProviderServer) OffboardConsumer(context.Context, *OffboardConsumerRequest) (*OffboardConsumerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method OffboardConsumer not implemented") +} +func (UnimplementedOCSProviderServer) UpdateCapacity(context.Context, *UpdateCapacityRequest) (*UpdateCapacityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCapacity not implemented") +} +func (UnimplementedOCSProviderServer) AcknowledgeOnboarding(context.Context, *AcknowledgeOnboardingRequest) (*AcknowledgeOnboardingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AcknowledgeOnboarding not implemented") +} +func (UnimplementedOCSProviderServer) FulfillStorageClassClaim(context.Context, *FulfillStorageClassClaimRequest) (*FulfillStorageClassClaimResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FulfillStorageClassClaim not implemented") +} +func (UnimplementedOCSProviderServer) RevokeStorageClassClaim(context.Context, *RevokeStorageClassClaimRequest) (*RevokeStorageClassClaimResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeStorageClassClaim not implemented") +} +func (UnimplementedOCSProviderServer) GetStorageClassClaimConfig(context.Context, *StorageClassClaimConfigRequest) (*StorageClassClaimConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStorageClassClaimConfig not implemented") +} +func (UnimplementedOCSProviderServer) mustEmbedUnimplementedOCSProviderServer() {} + +// UnsafeOCSProviderServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OCSProviderServer will +// result in compilation errors. +type UnsafeOCSProviderServer interface { + mustEmbedUnimplementedOCSProviderServer() +} + +func RegisterOCSProviderServer(s grpc.ServiceRegistrar, srv OCSProviderServer) { + s.RegisterService(&OCSProvider_ServiceDesc, srv) +} + +func _OCSProvider_OnboardConsumer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OnboardConsumerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).OnboardConsumer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/OnboardConsumer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).OnboardConsumer(ctx, req.(*OnboardConsumerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_GetStorageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).GetStorageConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/GetStorageConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).GetStorageConfig(ctx, req.(*StorageConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_OffboardConsumer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OffboardConsumerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).OffboardConsumer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/OffboardConsumer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).OffboardConsumer(ctx, req.(*OffboardConsumerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_UpdateCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).UpdateCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/UpdateCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).UpdateCapacity(ctx, req.(*UpdateCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_AcknowledgeOnboarding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AcknowledgeOnboardingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).AcknowledgeOnboarding(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/AcknowledgeOnboarding", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).AcknowledgeOnboarding(ctx, req.(*AcknowledgeOnboardingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_FulfillStorageClassClaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FulfillStorageClassClaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).FulfillStorageClassClaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/FulfillStorageClassClaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).FulfillStorageClassClaim(ctx, req.(*FulfillStorageClassClaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_RevokeStorageClassClaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeStorageClassClaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).RevokeStorageClassClaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/RevokeStorageClassClaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).RevokeStorageClassClaim(ctx, req.(*RevokeStorageClassClaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OCSProvider_GetStorageClassClaimConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageClassClaimConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OCSProviderServer).GetStorageClassClaimConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/provider.OCSProvider/GetStorageClassClaimConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OCSProviderServer).GetStorageClassClaimConfig(ctx, req.(*StorageClassClaimConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OCSProvider_ServiceDesc is the grpc.ServiceDesc for OCSProvider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OCSProvider_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "provider.OCSProvider", + HandlerType: (*OCSProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "OnboardConsumer", + Handler: _OCSProvider_OnboardConsumer_Handler, + }, + { + MethodName: "GetStorageConfig", + Handler: _OCSProvider_GetStorageConfig_Handler, + }, + { + MethodName: "OffboardConsumer", + Handler: _OCSProvider_OffboardConsumer_Handler, + }, + { + MethodName: "UpdateCapacity", + Handler: _OCSProvider_UpdateCapacity_Handler, + }, + { + MethodName: "AcknowledgeOnboarding", + Handler: _OCSProvider_AcknowledgeOnboarding_Handler, + }, + { + MethodName: "FulfillStorageClassClaim", + Handler: _OCSProvider_FulfillStorageClassClaim_Handler, + }, + { + MethodName: "RevokeStorageClassClaim", + Handler: _OCSProvider_RevokeStorageClassClaim_Handler, + }, + { + MethodName: "GetStorageClassClaimConfig", + Handler: _OCSProvider_GetStorageClassClaimConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "provider.proto", +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..dc5225b6d --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3ebf6f2da --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..f34a38e4e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,201 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..52338d004 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,60 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..c6672c0a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,28 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..1f8960922 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,46 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + proto \ + test \ + testrace \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..0e6ae69a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +With [Go module][] support (Go 1.11+), simply add the following import + +```go +import "google.golang.org/grpc" +``` + +to your code, and then `go [build|run|test]` will automatically fetch the +necessary dependencies. + +Otherwise, to install the `grpc-go` package, run the following command: + +```console +$ go get -u google.golang.org/grpc +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ```sh + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +#### If you are using Go modules: + +Ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +```go +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +#### If you are *not* using Go modules: + +Update the `proto` package, gRPC package, and rebuild the `.proto` files: + +```sh +go get -u github.com/golang/protobuf/{proto,protoc-gen-go} +go get -u google.golang.org/grpc +protoc --go_out=plugins=grpc:. *.proto +``` + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..ae13ddac1 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..542594f5c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..0787d0b50 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..bcc6f5451 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,425 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID int64 + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..a67074a3a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,250 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..e31d76e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..4ecfa1c21 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..274eb2f85 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..f4ea61746 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,292 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// exitIdle contains no data and is just a signal sent on the updateCh in +// ccBalancerWrapper to instruct the balancer to exit idle. +type exitIdle struct{} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + hasExitIdle bool + updateCh *buffer.Unbounded + closed *grpcsync.Event + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { + break + } + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + case exitIdle: + if ccb.cc.GetState() == connectivity.Idle { + if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { + // We already checked that the balancer implements + // ExitIdle before pushing the event to updateCh, but + // check conditionally again as defensive programming. + ccb.balancerMu.Lock() + ei.ExitIdle() + ccb.balancerMu.Unlock() + } + } + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } + case <-ccb.closed.Done(): + } + + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() + ccb.balancer.Close() + ccb.balancerMu.Unlock() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) exitIdle() bool { + if !ccb.hasExitIdle { + return false + } + ccb.updateCh.Put(exitIdle{}) + return true +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.updateCh.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + return ccb.balancer.UpdateClientConnState(*ccs) +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + ccb.balancer.ResolverError(err) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + newAC, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() + if acState != connectivity.Idle { + go newAC.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..ed75290cd --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1187 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..9e20e4d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..f9af78913 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1731 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtInfo, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.Info(logger, cc.channelzID, "Channel Created") + } + cc.csMgr.channelzID = cc.channelzID + } + + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return nil, errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + resolverBuilder, err := cc.parseTargetAndFindResolver() + if err != nil { + return nil, err + } + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + if err != nil { + return nil, err + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + cc.Connect() + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + safeConfigSelector iresolver.SafeConfigSelector + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + return + } + for ac := range cc.conns { + go ac.connect() + } +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() + cc.balancerWrapper.close() + cc.mu.Lock() + } + + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() + + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu + + onClose := func() { + ac.mu.Lock() + defer ac.mu.Unlock() + defer connClosed.Fire() + if !hcStarted || hctx.Err() != nil { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. + return + } + hcancel() + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + } + + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + return err + } + + select { + case <-connectCtx.Done(): + // We didn't get the preface in time. + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + return err + } + return nil + case <-prefaceReceived.Done(): + // We got the preface - huzzah! things are good. + ac.mu.Lock() + defer ac.mu.Unlock() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } + } +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + return rb, nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return nil, err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + } + cc.parsedTarget = parsedTarget + return rb, nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and endpoint. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field instead of the `Endpoint` field. + endpoint := u.Path + if endpoint == "" { + endpoint = u.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + Endpoint: endpoint, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + switch { + case authorityFromDialOption != "": + return authorityFromDialOption, nil + case authorityFromCreds != "": + return authorityFromCreds, nil + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + return "localhost", nil + case strings.HasPrefix(endpoint, ":"): + return "localhost" + endpoint, nil + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + return endpoint, nil + } +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000..4cdc6ba7c --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..11b106182 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..4a8992642 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..96ff1877e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..784822d05 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/url" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..c4bf09f9e --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,617 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. +// Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently enabled by default, but may be disabled by +// setting the environment variable "GRPC_GO_RETRY" to "off". +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..0022859ad --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..6d84f74c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..3009b35af --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..8358dd6e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...interface{}) { + args = append([]interface{}{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...interface{}) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...interface{}) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...interface{}) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...interface{}) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...interface{}) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...interface{}) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...interface{}) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...interface{}) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..c8bb2be34 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..ef06a4822 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..7c1f66409 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...interface{}) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...interface{}) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..bb96ef57b --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..5fc0ee3da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..3a905d966 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..5cc3aeddb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +var grpclogLogger = grpclog.Component("binarylog") + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..d8f4e7602 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..0cdb41831 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,422 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..c2fdd58b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..9f6a0c120 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..ea660a147 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,744 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "context" + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) + idGen.reset() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. +// +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) + idGen.reset() + + return func() error { + cm := db.get() + if cm == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { + return nil + } + <-ticker.C + } + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid) + } else { + db.get().addChannel(id, cn, false, pid) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + logger.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid) + return id +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d.Desc) + case CtWarning: + l.WarningDepth(depth+1, d.Desc) + case CtError: + l.ErrorDepth(depth+1, d.Desc) + } + } + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + s := make([]*SocketMetric, 0, len(sks)) + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..b0013f9c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, args...) + } +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtInfo, + }) + } else { + l.InfoDepth(1, msg) + } +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, args...) + } +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + l.WarningDepth(1, msg) + } +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + l.ErrorDepth(1, args...) + } +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + l.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..3c595d154 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,701 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..1b1c4cce3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..8b06eed1a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,43 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 000000000..8d194e44e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 000000000..837ddc402 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,27 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..25ade6230 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 000000000..2919632d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..f792fd22c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..6f0272543 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,35 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..7d996e51b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,101 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "strings" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" + + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" + clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" + aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" + federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" + + c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + // XDSAggregateAndDNS indicates whether processing of aggregated cluster + // and DNS cluster is enabled, which can be enabled by setting the + // environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to + // "true". + XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // XDSFederation indicates whether federation support is enabled. + XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..30a3b4258 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...interface{}) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..82af70e96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..740f83c2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.Intn(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000..e2f948e8f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..4e7475060 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,84 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +// +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 000000000..7a092b2b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..20fb880f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,87 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..b8733dbf3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..c7a18a948 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..75301c514 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..520d9229e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..20852e59d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // prepend "\x00" to address for unix-abstract + addr.Addr = "\x00" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..badbdbf59 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..e5c6513ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,166 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..b3a72276d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..999f52cd7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..8394d252d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,984 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + select { + case <-ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + c.trfChan.Store(make(chan struct{})) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter.run returning. %v", err) + } + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if logger.V(logLevel) { + logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..9fa306b2e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..97198c515 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..1c3459c2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,462 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() { + ht.closeOnce.Do(ht.closeCloseChanOnce) +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..f0c72d337 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1696 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + t.Close(err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + t.Close(err) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + t.Close(err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + t.Close(err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. In this case there is no reason to retry at all, as it is +// assumed the RPC would continue to fail on subsequent attempts. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + DoNotRetry bool + DoNotTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, &NewStreamError{Err: err} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing} + } + } + if t.statsHandler != nil { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close(err error) { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(ErrConnClosing) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + if logger.V(logLevel) { + logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + err = connectionErrorf(true, err, "error reading server preface: %v", err) + t.Close(err) // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + // this kicks off resetTransport, so must be last before return + t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..2c6eaf0e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1430 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpcutil" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when Drain() is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + t := &http2Server{ + ctx: setConnection(context.Background(), rawConn), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + if logger.V(logLevel) { + logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) + } + } + t.conn.Close() + t.controlBuf.finish() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return false + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + } + return true + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + } + headerError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if logger.V(logLevel) { + logger.Errorf("transport: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 400, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + }) + return false + } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + } + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + s.cancel() + return false + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if logger.V(logLevel) { + logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + }) + return false + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if logger.V(logLevel) { + logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + } + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + if logger.V(logLevel) { + logger.Errorf("transport: Got too many pings from the client, closing the connection.") + } + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if logger.V(logLevel) { + logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain() + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain() + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to maximum connection age.") + } + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + if logger.V(logLevel) { + logger.Infof("transport: closing server transport due to idleness.") + } + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..d8247bcdf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,424 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } + logger = grpclog.Component("transport") +) + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 000000000..415961987 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,142 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..0c43efaa6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,812 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..e8b492774 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..34d31b5e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..3604c7819 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,247 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..e01d219ff --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..e8367cb89 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,177 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{})} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + return nil, nil, err + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acw.getAddrConn().getReadyTransport(); t != nil { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..5168b62b0 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,155 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) + } + if logger.V(2) { + logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if logger.V(2) { + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + } + if b.sc != sc { + if logger.V(2) { + logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: s.ConnectionError}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.sc != nil && b.state == connectivity.Idle { + b.sc.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + sc balancer.SubConn +} + +func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + i.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..0a1e975ad --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..978b89f37 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,131 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + reflection/grpc_reflection_v1alpha/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +# grpc/testing does not have a go_package option. +mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ +mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..e87ecd0ee --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + m map[string]addressMapEntryList +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[string]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + if len(l) == 0 { + return -1 + } + for i, entry := range l { + if entry.addr.ServerName == addr.ServerName && + entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + entryList := a.m[addr.Addr] + if entry := entryList.find(addr); entry != -1 { + a.m[addr.Addr][entry].value = value + return + } + a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + entryList := a.m[addr.Addr] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addr.Addr] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..e28b68026 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,286 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + "net/url" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. + GRPCLB +) + +// Address represents a server the client connects to. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attribes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a *Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +// +// Examples: +// +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +type Target struct { + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. + Authority string + // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when + // the former is empty. + Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..2c47cd54f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return nil + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + return balancer.ErrBadResolverState + } + return nil +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() + if ccr.done.HasFired() { + return + } + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtInfo, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..5d407b004 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,914 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..b24b6d539 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1870 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} +} + +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData + + serverWorkerChannels []chan *serverWorkerData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(lisAddr, st) + }() +} + +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain() + } + s.mu.Unlock() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + + var roundRobinCounter uint32 + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next UnaryHandler + } + state.next = func(ctx context.Context, req interface{}) (interface{}, error) { + if state.i == len(interceptors)-1 { + return interceptors[state.i](ctx, req, info, handler) + } + state.i++ + return interceptors[state.i-1](ctx, req, info, state.next) + } + return state.next(ctx, req) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength + headerLen, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + // the struct ensures the variables are allocated together, rather than separately, since we + // know they should be garbage collected together. This saves 1 allocation and decreases + // time/call by about 10% on the microbenchmark. + var state struct { + i int + next StreamHandler + } + state.next = func(srv interface{}, ss ServerStream) error { + if state.i == len(interceptors)-1 { + return interceptors[state.i](srv, ss, info, handler) + } + state.i++ + return interceptors[state.i-1](srv, ss, info, state.next) + } + return state.next(srv, ss) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + conns := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for _, cs := range conns { + for st := range cs { + st.Close() + } + } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain() + } + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..6926a06dc --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancerName will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: *ib, + MaxBackoff: *mb, + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + if rp.MaxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.MaxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..73a2f9266 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..0285dcc6a --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,319 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..6d163b6e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..8cdd652e0 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1620 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + + newAttempt := &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := ctx.Err(); err != nil { + return toRPCErr(err) + } + + if cs.cc.parsedTarget.Scheme == "xds" { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler + beginTime time.Time +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (cs *clientStream) shouldRetry(err error) (bool, error) { + if cs.attempt.s == nil { + // Error from NewClientStream. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected, but assume no I/O was performed and the RPC is not + // fatal, so retry indefinitely. + return true, nil + } + + // Unwrap and convert error. + err = toRPCErr(nse.Err) + + // Never retry DoNotRetry errors, which indicate the RPC should not be + // retried due to max header list size violation, etc. + if nse.DoNotRetry { + return false, err + } + + // In the event of a non-IO operation error from NewStream, we never + // attempted to write anything to the wire, so we can retry + // indefinitely. + if !nse.DoNotTransparentRetry { + return true, nil + } + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return false, err + } + // Wait for the trailers. + unprocessed := false + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(toRPCErr(lastErr)) + isTransparent, err := cs.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + if err := cs.newAttemptLocked(isTransparent); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..dbf34e6bb --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..07a2d26b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..5bd4f534c --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.45.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 000000000..ceb436c6c --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,211 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=3.14.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +done + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +resolver.Backend +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index 4f561d55f..7bc9bf301 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -134,6 +134,9 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/openshift/api v0.0.0-20220421141645-441fe135b2fc +## explicit; go 1.16 +github.com/openshift/api/config/v1 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -159,6 +162,10 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/red-hat-storage/ocs-operator/services/provider v1.4.12-0.rc2 => github.com/jarrpa/ocs-operator/services/provider v1.4.12-0.rc2 +## explicit; go 1.17 +github.com/red-hat-storage/ocs-operator/services/provider/client +github.com/red-hat-storage/ocs-operator/services/provider/pb # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -194,6 +201,8 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/timeseries +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 ## explicit; go 1.11 golang.org/x/oauth2 @@ -252,6 +261,56 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch +# google.golang.org/genproto v0.0.0-20220207185906-7721543eae58 +## explicit; go 1.11 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.45.0 +## explicit; go 1.14 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.27.1 ## explicit; go 1.9 google.golang.org/protobuf/encoding/prototext