diff --git a/go.mod b/go.mod index 47890b417c..e6d8f83448 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module knative.dev/eventing-kafka-broker -go 1.22.0 +go 1.22.7 require ( github.com/IBM/sarama v1.43.3 @@ -28,17 +28,17 @@ require ( go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 k8s.io/api v0.30.3 k8s.io/apiextensions-apiserver v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/apiserver v0.30.3 k8s.io/client-go v0.30.3 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - knative.dev/eventing v0.43.1-0.20241028083747-ef6b31a697e7 - knative.dev/hack v0.0.0-20241025103803-ef6e7e983a60 - knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 - knative.dev/reconciler-test v0.0.0-20241024141702-aae114c1c0e3 + knative.dev/eventing v0.43.1-0.20241223131119-c9047a198255 + knative.dev/hack v0.0.0-20241227080210-e92a16ae0893 + knative.dev/pkg v0.0.0-20241223131119-4c901591eb4a + knative.dev/reconciler-test v0.0.0-20241223131247-96258bea6ce4 sigs.k8s.io/controller-runtime v0.12.3 sigs.k8s.io/yaml v1.4.0 ) @@ -115,20 +115,20 @@ require ( github.com/xdg-go/stringprep v1.0.4 // indirect go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.27.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/api v0.183.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/grpc v1.67.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.68.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 6063d10eba..5bb764dcf2 100644 --- a/go.sum +++ b/go.sum @@ -777,8 +777,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -831,8 +831,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -841,8 +841,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1010,8 +1010,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1076,10 +1076,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1095,8 +1095,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1111,8 +1111,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1214,14 +1214,14 @@ k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/eventing v0.43.1-0.20241028083747-ef6b31a697e7 h1:pYKhXbvHVOmQumyKS7vjQBaB11rXzeAjz84z2L9qrtM= -knative.dev/eventing v0.43.1-0.20241028083747-ef6b31a697e7/go.mod h1:2mdt9J66vQYzxizDz8I/F6IGzV1QgwCkacBR8X12Ssk= -knative.dev/hack v0.0.0-20241025103803-ef6e7e983a60 h1:LjBbosBvW/9/qjzIJtGpehPsbNWVvy1Fz8yZvMbFWe4= -knative.dev/hack v0.0.0-20241025103803-ef6e7e983a60/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY= -knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 h1:uUSDGlOIkdPT4svjlhi+JEnP2Ufw7AM/F5QDYiEL02U= -knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3/go.mod h1:FeMbTLlxQqSASwlRCrYEOsZ0OKUgSj52qxhECwYCJsw= -knative.dev/reconciler-test v0.0.0-20241024141702-aae114c1c0e3 h1:pa3b/0EKzONPLBHqM0RFzG5EaB+k8OTyHGaUXWPIgns= -knative.dev/reconciler-test v0.0.0-20241024141702-aae114c1c0e3/go.mod h1:W9Kmdoxelg2mswUpDKerL/4Ih1/ouVhlSMeZeJ5LX9c= +knative.dev/eventing v0.43.1-0.20241223131119-c9047a198255 h1:pgj6ZnPAklWTTeef9d35/avLBb2rREX7Y5+BwZp6W0I= +knative.dev/eventing v0.43.1-0.20241223131119-c9047a198255/go.mod h1:eBhIxUaRBUAJUQ98189X+KBps/UoWJY4B/MZk+FiVIM= +knative.dev/hack v0.0.0-20241227080210-e92a16ae0893 h1:zy7LwNJ2S7obPMHVAtxQgZPXxBTZzoxHbtb6uhxOl7Q= +knative.dev/hack v0.0.0-20241227080210-e92a16ae0893/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY= +knative.dev/pkg v0.0.0-20241223131119-4c901591eb4a h1:31rLKAGHeQEkxMOc/h4XCmHOTiR/1R4NRPvJ3wg05WY= +knative.dev/pkg v0.0.0-20241223131119-4c901591eb4a/go.mod h1:C2dxK66GlycMOS0SKqv0SMAnWkxsYbG4hkH32Xg1qD0= +knative.dev/reconciler-test v0.0.0-20241223131247-96258bea6ce4 h1:6t8hbwR0tHc1AzUExhLrc7SWVmN4e9mdXkHAC38Y/h0= +knative.dev/reconciler-test v0.0.0-20241223131247-96258bea6ce4/go.mod h1:zDapuiJIFS67XEVBJWdEnVN5KHE8EvFGooLQGXeDH/c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6c1..e81b73e6a7 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 617b4a4762..832414b450 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -306,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -323,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -2880,6 +2899,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 0c5f64aa8b..f5968f4407 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -324,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -364,6 +397,14 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. @@ -420,12 +461,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -530,6 +571,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -562,7 +605,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -573,7 +623,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -598,6 +648,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -616,9 +682,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -757,6 +824,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } var group synctestGroupInterface if t.transportTestHooks != nil { @@ -960,7 +1028,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -992,16 +1060,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1014,7 +1104,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1578,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -2180,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2494,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -3046,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3228,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 0000000000..b2de211613 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174..109997d77c 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 18d1adb05d..a6b5ed0a89 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. +// The provided File must have been parsed with syntactic object resolution +// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { + if f.Scope == nil { + panic("file f was not parsed with syntactic object resolution") + } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1d..f3ab0a2e12 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -100,6 +142,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +175,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 8f7afcb5df..96db9daf31 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -79,7 +79,7 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found. @@ -103,7 +103,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +117,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1a3a5b44f5..76f910ecec 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -80,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -681,7 +692,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -751,7 +762,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -759,7 +770,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -840,7 +851,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -851,11 +862,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -879,6 +887,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index f227f1bab1..2ecc64238e 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -56,7 +55,7 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -78,7 +77,7 @@ const ( // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -145,13 +144,7 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -182,19 +175,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -241,9 +225,13 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. @@ -334,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -386,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -692,18 +681,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -749,9 +739,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -765,7 +752,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -806,7 +793,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -831,9 +818,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -852,63 +840,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -921,16 +922,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -977,7 +1007,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } - if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { ld.pkgs[i].Fset = nil } if ld.requestedMode&NeedTypesInfo == 0 { @@ -994,31 +1024,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1054,6 +1063,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1159,7 +1172,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1170,16 +1183,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1232,6 +1249,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1296,8 +1317,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1314,20 +1338,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1342,18 +1374,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1524,4 +1559,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index a70b727f2c..16ed3c1780 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -281,25 +281,25 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { T := o.Type() if alias, ok := T.(*types.Alias); ok { - if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { return Path(r), nil } } else if tname.IsAlias() { // legacy alias - if r := find(obj, T, path, nil); r != nil { + if r := find(obj, T, path); r != nil { return Path(r), nil } } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -312,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -332,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -447,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { case *types.Alias: - return find(obj, types.Unalias(T), path, seen) + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -495,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -506,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) + } + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -535,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, op, i) - if r := find(obj, tparam, path2, seen); r != nil { + if r := f.find(tparam, path2); r != nil { return r } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 1e19fbed8e..7dfc31a37d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -246,6 +246,26 @@ import ( // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -268,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 21908a158b..e260c0e8db 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -558,6 +558,14 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() @@ -570,6 +578,7 @@ func (r *importReader) obj(name string) { } typ := r.typ() obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: @@ -590,6 +599,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 0000000000..7586bfaca6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index c15108178a..5ae576977a 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,7 +27,6 @@ import ( "unicode" "unicode/utf8" - "golang.org/x/sync/errgroup" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -91,18 +90,6 @@ type ImportFix struct { Relevance float64 // see pkg } -// An ImportInfo represents a single import statement. -type ImportInfo struct { - ImportPath string // import path, e.g. "crypto/rand". - Name string // import name, e.g. "crand", or "" if none. -} - -// A packageInfo represents what's known about a package. -type packageInfo struct { - name string // real package name, if known. - exports map[string]bool // known exports. -} - // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. // @@ -162,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) { // collectReferences builds a map of selector expressions, from // left hand side (X) to a set of right hand sides (Sel). -func collectReferences(f *ast.File) references { - refs := references{} +func collectReferences(f *ast.File) References { + refs := References{} var visitor visitFn visitor = func(node ast.Node) ast.Visitor { @@ -233,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { allFound := true for right := range syms { - if !pkgInfo.exports[right] { + if !pkgInfo.Exports[right] { allFound = false break } @@ -246,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { return nil } -// references is set of references found in a Go file. The first map key is the -// left hand side of a selector expression, the second key is the right hand -// side, and the value should always be true. -type references map[string]map[string]bool - // A pass contains all the inputs and state necessary to fix a file's imports. // It can be modified in some ways during use; see comments below. type pass struct { @@ -258,27 +240,29 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - env *ProcessEnv // the environment to use for go commands, etc. - loadRealPackageNames bool // if true, load package names from disk rather than guessing them. - otherFiles []*ast.File // sibling files. + logf func(string, ...any) + source Source // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. + goroot string // Intermediate state, generated by load. existingImports map[string][]*ImportInfo - allRefs references - missingRefs references + allRefs References + missingRefs References // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. candidates []*ImportInfo // candidate imports in priority order. - knownPackages map[string]*packageInfo // information about all known packages. + knownPackages map[string]*PackageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Logf != nil { - p.env.Logf("loading package names for %v packages", len(imports)) +func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { + if p.logf != nil { + p.logf("loading package names for %v packages", len(imports)) defer func() { - p.env.Logf("done loading package names for %v packages", len(imports)) + p.logf("done loading package names for %v packages", len(imports)) }() } var unknown []string @@ -289,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - resolver, err := p.env.GetResolver() - if err != nil { - return err - } - - names, err := resolver.loadPackageNames(unknown, p.srcDir) + names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) if err != nil { return err } + // TODO(rfindley): revisit this. Why do we need to store known packages with + // no exports? The inconsistent data is confusing. for path, name := range names { - p.knownPackages[path] = &packageInfo{ - name: name, - exports: map[string]bool{}, + p.knownPackages[path] = &PackageInfo{ + Name: name, + Exports: map[string]bool{}, } } return nil @@ -330,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { return imp.Name } known := p.knownPackages[imp.ImportPath] - if known != nil && known.name != "" { - return withoutVersion(known.name) + if known != nil && known.Name != "" { + return withoutVersion(known.Name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -339,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load() ([]*ImportFix, bool) { - p.knownPackages = map[string]*packageInfo{} - p.missingRefs = references{} +func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { + p.knownPackages = map[string]*PackageInfo{} + p.missingRefs = References{} p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. @@ -364,9 +345,11 @@ func (p *pass) load() ([]*ImportFix, bool) { // f's imports by the identifier they introduce. imports := collectImports(p.f) if p.loadRealPackageNames { - err := p.loadPackageNames(append(imports, p.candidates...)) + err := p.loadPackageNames(ctx, append(imports, p.candidates...)) if err != nil { - p.env.logf("loading package names: %v", err) + if p.logf != nil { + p.logf("loading package names: %v", err) + } return nil, false } } @@ -535,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() { // We have the stdlib in memory; no need to guess. rights = symbolNameSet(m) } - p.addCandidate(imp, &packageInfo{ + // TODO(rfindley): we should set package name here, for consistency. + p.addCandidate(imp, &PackageInfo{ // no name; we already know it. - exports: rights, + Exports: rights, }) } } @@ -546,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { p.candidates = append(p.candidates, imp) if existing, ok := p.knownPackages[imp.ImportPath]; ok { - if existing.name == "" { - existing.name = pkg.name + if existing.Name == "" { + existing.Name = pkg.Name } - for export := range pkg.exports { - existing.exports[export] = true + for export := range pkg.Exports { + existing.Exports[export] = true } } else { p.knownPackages[imp.ImportPath] = pkg @@ -581,19 +565,42 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { + source, err := NewProcessEnvSource(env, filename, f.Name.Name) + if err != nil { + return nil, err + } + goEnv, err := env.goEnv() + if err != nil { + return nil, err + } + return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) +} + +func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { + // This logic is defensively duplicated from getFixes. abs, err := filepath.Abs(filename) if err != nil { return nil, err } srcDir := filepath.Dir(abs) - env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) + + if logf != nil { + logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) + } // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} - if fixes, done := p.load(); done { + p := &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: source, + } + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -605,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Second pass: add information from other files in the same package, // like their package vars and imports. p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -618,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st // Third pass: get real package names where we had previously used // the naive algorithm. - p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} + p = &pass{ + fset: fset, + f: f, + srcDir: srcDir, + logf: logf, + goroot: goroot, + source: p.source, // safe to reuse, as it's just a wrapper around env + } p.loadRealPackageNames = true p.otherFiles = otherFiles - if fixes, done := p.load(); done { + if fixes, done := p.load(ctx); done { return fixes, nil } @@ -835,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return true }, dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) }, packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg @@ -1086,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs references) error { - goenv, err := pass.env.goEnv() - if err != nil { - return err - } +func addStdlibCandidates(pass *pass, refs References) error { localbase := func(nm string) string { ans := path.Base(nm) if ans[0] == 'v' { @@ -1105,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error { } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { return } exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: localbase(pkg), exports: exports}) + &PackageInfo{Name: localbase(pkg), Exports: exports}) } for left := range refs { if left == "rand" { @@ -1175,91 +1185,14 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } -func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { ctx, done := event.Start(ctx, "imports.addExternalCandidates") defer done() - var mu sync.Mutex - found := make(map[string][]pkgDistance) - callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true // We want everything. - }, - dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, refs, pkg) - }, - packageNameLoaded: func(pkg *pkg) bool { - if _, want := refs[pkg.packageName]; !want { - return false - } - if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - return false - } - if !canUse(filename, pkg.dir) { - return false - } - mu.Lock() - defer mu.Unlock() - found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) - return false // We'll do our own loading after we sort. - }, - } - resolver, err := pass.env.GetResolver() + results, err := pass.source.ResolveReferences(ctx, filename, refs) if err != nil { return err } - if err = resolver.scan(ctx, callback); err != nil { - return err - } - - // Search for imports matching potential package references. - type result struct { - imp *ImportInfo - pkg *packageInfo - } - results := make([]*result, len(refs)) - - g, ctx := errgroup.WithContext(ctx) - - searcher := symbolSearcher{ - logf: pass.env.logf, - srcDir: pass.srcDir, - xtest: strings.HasSuffix(pass.f.Name.Name, "_test"), - loadExports: resolver.loadExports, - } - - i := 0 - for pkgName, symbols := range refs { - index := i // claim an index in results - i++ - pkgName := pkgName - symbols := symbols - - g.Go(func() error { - found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) - if err != nil { - return err - } - if found == nil { - return nil // No matching package. - } - - imp := &ImportInfo{ - ImportPath: found.importPathShort, - } - pkg := &packageInfo{ - name: pkgName, - exports: symbols, - } - results[index] = &result{imp, pkg} - return nil - }) - } - if err := g.Wait(); err != nil { - return err - } for _, result := range results { if result == nil { @@ -1267,7 +1200,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil } // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. - if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + if types.Universe.Lookup(result.Package.Name) != nil { // predeclared // Ideally we would skip this candidate only // if the predeclared name is actually // referenced by the file, but that's a lot @@ -1276,7 +1209,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil // user before long. continue } - pass.addCandidate(result.imp, result.pkg) + pass.addCandidate(result.Import, result.Package) } return nil } @@ -1801,7 +1734,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index ff6b59a58a..2215a12880 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -47,7 +47,14 @@ type Options struct { // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) + var parserMode parser.Mode + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) if err != nil { return nil, err } @@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { ctx, done := event.Start(ctx, "imports.FixImports") defer done() fileSet := token.NewFileSet() - file, _, err := parse(fileSet, filename, src, opt) + // TODO(rfindley): these default values for ParseComments and AllErrors were + // extracted from gopls, but are they even needed? + file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) if err != nil { return nil, err } - return getFixes(ctx, fileSet, file, filename, opt.Env) + return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // formatted file, and returns the postpocessed result. func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { mergeImports(file) - sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) + sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) var spacesBefore []string // import paths we need spaces before for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any @@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - var parserMode parser.Mode // legacy ast.Object resolution is required here - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors +func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { + if parserMode&parser.SkipObjectResolution != 0 { + panic("legacy ast.Object resolution is required") } // Try as whole source file. @@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast // If the error is that the source file didn't begin with a // package line and we accept fragmented input, fall through to // try as a source fragment. Stop and return on any other error. - if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { + if !fragment || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go new file mode 100644 index 0000000000..5d2aeeebc9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import "context" + +// These types document the APIs below. +// +// TODO(rfindley): consider making these defined types rather than aliases. +type ( + ImportPath = string + PackageName = string + Symbol = string + + // References is set of References found in a Go file. The first map key is the + // left hand side of a selector expression, the second key is the right hand + // side, and the value should always be true. + References = map[PackageName]map[Symbol]bool +) + +// A Result satisfies a missing import. +// +// The Import field describes the missing import spec, and the Package field +// summarizes the package exports. +type Result struct { + Import *ImportInfo + Package *PackageInfo +} + +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A PackageInfo represents what's known about a package. +type PackageInfo struct { + Name string // package name in the package declaration, if known + Exports map[string]bool // set of names of known package level sortSymbols +} + +// A Source provides imports to satisfy unresolved references in the file being +// fixed. +type Source interface { + // LoadPackageNames queries PackageName information for the requested import + // paths, when operating from the provided srcDir. + // + // TODO(rfindley): try to refactor to remove this operation. + LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) + + // ResolveReferences asks the Source for the best package name to satisfy + // each of the missing references, in the context of fixing the given + // filename. + // + // Returns a map from package name to a [Result] for that package name that + // provides the required symbols. Keys may be omitted in the map if no + // candidates satisfy all missing references for that package name. It is up + // to each data source to select the best result for each entry in the + // missing map. + ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) +} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go new file mode 100644 index 0000000000..ff9555d287 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -0,0 +1,125 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "path/filepath" + "strings" + "sync" + + "golang.org/x/sync/errgroup" + "golang.org/x/tools/internal/gopathwalk" +) + +// ProcessEnvSource implements the [Source] interface using the legacy +// [ProcessEnv] abstraction. +type ProcessEnvSource struct { + env *ProcessEnv + srcDir string + filename string + pkgName string +} + +// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given +// env, to be used for fixing imports in the file with name filename in package +// named pkgName. +func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { + abs, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + srcDir := filepath.Dir(abs) + return &ProcessEnvSource{ + env: env, + srcDir: srcDir, + filename: filename, + pkgName: pkgName, + }, nil +} + +func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { + r, err := s.env.GetResolver() + if err != nil { + return nil, err + } + return r.loadPackageNames(unknown, srcDir) +} + +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := s.env.GetResolver() + if err != nil { + return nil, err + } + if err := resolver.scan(ctx, callback); err != nil { + return nil, err + } + + g, ctx := errgroup.WithContext(ctx) + + searcher := symbolSearcher{ + logf: s.env.logf, + srcDir: s.srcDir, + xtest: strings.HasSuffix(s.pkgName, "_test"), + loadExports: resolver.loadExports, + } + + var resultMu sync.Mutex + results := make(map[string]*Result, len(refs)) + for pkgName, symbols := range refs { + g.Go(func() error { + found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) + if err != nil { + return err + } + if found == nil { + return nil // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + pkg := &PackageInfo{ + Name: pkgName, + Exports: symbols, + } + resultMu.Lock() + results[pkgName] = &Result{Import: imp, Package: pkg} + resultMu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return results, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go index 358108268b..0ade5c2949 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/free.go +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -6,6 +6,8 @@ package typeparams import ( "go/types" + + "golang.org/x/tools/internal/aliases" ) // Free is a memoization of the set of free type parameters within a @@ -36,6 +38,18 @@ func (w *Free) Has(typ types.Type) (res bool) { break case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. return w.Has(types.Unalias(t)) case *types.Array: @@ -96,9 +110,8 @@ func (w *Free) Has(typ types.Type) (res bool) { case *types.Named: args := t.TypeArgs() - // TODO(taking): this does not match go/types/infer.go. Check with rfindley. if params := t.TypeParams(); params.Len() > args.Len() { - return true + return true // this is an uninstantiated named type. } for i, n := 0, args.Len(); i < n; i++ { if w.Has(args.At(i)) { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 8392328612..df3ea52125 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { return other.Name() } } + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index f0bb0d15f0..0fc10ce4eb 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -31,8 +31,3 @@ func FileVersion(info *types.Info, file *ast.File) string { // This would act as a max version on what a tool can support. return Future } - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e4..d9bfa6e1e7 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a1..3a2092f105 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -130,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. @@ -142,8 +142,11 @@ type SubConn interface { Connect() // GetOrBuildProducer returns a reference to the existing Producer for this // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) // Shutdown shuts down the SubConn gracefully. Any started RPCs will be // allowed to complete. No future calls should be made on the SubConn. @@ -452,8 +455,10 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. Build(grpcClientConnInterface any) (p Producer, close func()) } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 2b87bd79c7..d5ed172ae6 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 0000000000..c519789458 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 4d69b4052f..e069346a75 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -26,18 +26,23 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 0000000000..985b6edc7f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,625 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 8ad6ce2f09..2a4f2878ae 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,12 +24,14 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) @@ -256,8 +258,8 @@ type acBalancerWrapper struct { ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +269,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -275,16 +280,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve setConnectedAddress(&scs, curAddr) } acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,8 +332,8 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] @@ -353,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3fd..19763f8edd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -825,14 +825,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -1141,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1183,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1251,6 +1254,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1297,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1504,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 4114358545..e163a473df 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 2b285beee3..518692c3af 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a9266..85540f86a7 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54e..3ec662799a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6ea..b5a8249929 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b25..90103847c5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba8..b20802e6e9 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b8674032..2bffe47776 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 452985f8d8..6e7dd6b772 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -50,6 +50,11 @@ var ( // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d63927..8e8e861280 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775e..683d1955c6 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c5..2c13ee9dac 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 7aae9240ff..20b4dc3d35 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -205,10 +207,6 @@ var ( // default resolver scheme. UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. ConnectedAddress any // func (scs SubConnState) resolver.Address @@ -235,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b0..8691698ef2 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f9..79044657be 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 757925381f..1186f1e9a9 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c769deab53..62b81885d8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -335,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -527,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -1008,6 +1010,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1030,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1316,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1393,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1628,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1669,7 +1680,7 @@ func (t *http2Client) reader(errCh chan<- error) { continue } // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + errClose = connectionErrorf(true, err, "error reading from server: %v", err) return } switch frame := frame.(type) { @@ -1684,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1697,6 +1708,13 @@ func (t *http2Client) reader(errCh chan<- error) { // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1720,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 924ba4f365..e12cb0bc91 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -547,6 +547,15 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. func (s *Stream) ReadHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go index 4d66b2ccc2..ecbf0b9a73 100644 --- a/vendor/google.golang.org/grpc/mem/buffers.go +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -232,12 +239,21 @@ func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2d96f1405e..aba1ae3e67 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -791,9 +791,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index a96b6a6bff..d50e843598 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.67.1" +const Version = "1.68.0" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c8206..e9fe103943 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360a..2c0693d7ab 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 08dad7692c..bf1aba0e85 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -10,4 +10,9 @@ import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index fa790e0ff1..f325298564 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,28 +78,42 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index fd4d0c83d2..7611796e86 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 7f67cbb6e9..09792d96f6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -21,13 +21,30 @@ const ( // Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) // Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 ) // Extension numbers diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index fb8e15e8da..62a52a40a3 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 35 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f58..69a0505091 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 002e0047ae..d0aeab9585 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -43,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -127,6 +129,9 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { parentFS.GenerateLegacyUnmarshalJSON = *luje } + if sep := goFeatures.StripEnumPrefix; sep != nil { + parentFS.StripEnumPrefix = int(*sep) + } } return parentFS diff --git a/vendor/google.golang.org/protobuf/testing/protocmp/util.go b/vendor/google.golang.org/protobuf/testing/protocmp/util.go index 838e70fbcc..2282811b6e 100644 --- a/vendor/google.golang.org/protobuf/testing/protocmp/util.go +++ b/vendor/google.golang.org/protobuf/testing/protocmp/util.go @@ -524,6 +524,9 @@ func IgnoreUnknown() cmp.Option { // handled by this option. To sort Go slices that are not repeated fields, // consider using [github.com/google/go-cmp/cmp/cmpopts.SortSlices] instead. // +// The sorting of messages does not take into account ignored fields or oneofs +// as a result of [IgnoreFields] or [IgnoreOneofs]. +// // This must be used in conjunction with [Transform]. func SortRepeated(lessFunc any) cmp.Option { t, ok := checkTTBFunc(lessFunc) @@ -624,6 +627,9 @@ func checkTTBFunc(lessFunc any) (reflect.Type, bool) { // ... // user-provided definition for less // })) // +// The sorting of messages does not take into account ignored fields or oneofs +// as a result of [IgnoreFields] or [IgnoreOneofs]. +// // This must be used in conjunction with [Transform]. func SortRepeatedFields(message proto.Message, names ...protoreflect.Name) cmp.Option { var opts cmp.Options diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index c7e860fcd6..5067b89e90 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -18,13 +18,76 @@ import ( sync "sync" ) +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} + +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} + type GoFeatures struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Whether or not to generate the deprecated UnmarshalJSON method for enums. - LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` } func (x *GoFeatures) Reset() { @@ -64,6 +127,13 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -88,7 +158,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -101,14 +171,31 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, + 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, + 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, + 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, + 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, + 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, + 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, + 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, + 0x0a, 0x16, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, + 0x46, 0x49, 0x58, 0x5f, 0x4b, 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, + 0x1b, 0x0a, 0x17, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, + 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, + 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, + 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -123,19 +210,22 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_StripEnumPrefix)(0), // 0: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 1: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 2: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 2, // 1: pb.go:extendee -> google.protobuf.FeatureSet + 1, // 2: pb.go:type_name -> pb.GoFeatures + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -148,13 +238,14 @@ func file_google_protobuf_go_features_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() diff --git a/vendor/knative.dev/eventing/hack/e2e-debug.sh b/vendor/knative.dev/eventing/hack/e2e-debug.sh index b9650e467b..b2bc01bccd 100644 --- a/vendor/knative.dev/eventing/hack/e2e-debug.sh +++ b/vendor/knative.dev/eventing/hack/e2e-debug.sh @@ -35,4 +35,4 @@ wait_until_pods_running knative-eventing || fail_test "Pods in knative-eventing header "Running tests" -go test -tags=e2e -v -timeout=30m -run="${test_name}" "${test_dir}" || fail_test "Test(s) failed" +go test -tags=e2e -v -timeout=30m -parallel=12 -run="${test_name}" "${test_dir}" || fail_test "Test(s) failed" diff --git a/vendor/knative.dev/eventing/hack/update-codegen.sh b/vendor/knative.dev/eventing/hack/update-codegen.sh index a0fe67dcbf..dc5206ae11 100644 --- a/vendor/knative.dev/eventing/hack/update-codegen.sh +++ b/vendor/knative.dev/eventing/hack/update-codegen.sh @@ -49,7 +49,7 @@ group "Knative Codegen" # Knative Injection ${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ knative.dev/eventing/pkg/client knative.dev/eventing/pkg/apis \ - "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1 duck:v1beta1 duck:v1" \ + "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1alpha1 sources:v1beta2 sources:v1 duck:v1beta1 duck:v1" \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt group "Generating API reference docs" diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go new file mode 100644 index 0000000000..8f815119bc --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/auth.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type Auth struct { + // Auth Secret + Secret *Secret `json:"secret,omitempty"` + + // AccessKey is the AWS access key ID. + AccessKey string `json:"accessKey,omitempty"` + + // SecretKey is the AWS secret access key. + SecretKey string `json:"secretKey,omitempty"` +} + +func (a *Auth) HasAuth() bool { + return a != nil && a.Secret != nil && + a.Secret.Ref != nil && a.Secret.Ref.Name != "" +} + +type Secret struct { + // Secret reference for SASL and SSL configurations. + Ref *SecretReference `json:"ref,omitempty"` +} + +type SecretReference struct { + // Secret name. + Name string `json:"name"` +} diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go new file mode 100644 index 0000000000..7a77d57b52 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/aws.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // AwsAccessKey is the name of the expected key on the secret for accessing the actual AWS access key value. + AwsAccessKey = "aws.accessKey" + // AwsSecretKey is the name of the expected key on the secret for accessing the actual AWS secret key value. + AwsSecretKey = "aws.secretKey" +) + +type AWSCommon struct { + // Auth is the S3 authentication (accessKey/secretKey) configuration. + Region string `json:"region,omitempty"` // AWS region + URIEndpointOverride string `json:"uriEndpointOverride,omitempty"` // Override endpoint URI + OverrideEndpoint bool `json:"overrideEndpoint" default:"false"` // Override endpoint flag +} + +type AWSS3 struct { + AWSCommon `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON + Arn string `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_S3_SOURCE_BUCKETNAMEORARN"` // S3 ARN + DeleteAfterRead bool `json:"deleteAfterRead" default:"true"` // Auto-delete objects after reading + MoveAfterRead bool `json:"moveAfterRead" default:"false"` // Move objects after reading + DestinationBucket string `json:"destinationBucket,omitempty"` // Destination bucket for moved objects + DestinationBucketPrefix string `json:"destinationBucketPrefix,omitempty"` // Prefix for moved objects + DestinationBucketSuffix string `json:"destinationBucketSuffix,omitempty"` // Suffix for moved objects + AutoCreateBucket bool `json:"autoCreateBucket" default:"false"` // Auto-create S3 bucket + Prefix string `json:"prefix,omitempty"` // S3 bucket prefix for search + IgnoreBody bool `json:"ignoreBody" default:"false"` // Ignore object body + ForcePathStyle bool `json:"forcePathStyle" default:"false"` // Force path style for bucket access + Delay int `json:"delay" default:"500"` // Delay between polls in milliseconds + MaxMessagesPerPoll int `json:"maxMessagesPerPoll" default:"10"` // Max messages to poll per request +} + +type AWSSQS struct { + AWSCommon `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON + Arn string `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_SQS_SOURCE_QUEUENAMEORARN"` // SQS ARN + DeleteAfterRead bool `json:"deleteAfterRead" default:"true"` // Auto-delete messages after reading + AutoCreateQueue bool `json:"autoCreateQueue" default:"false"` // Auto-create SQS queue + Host string `json:"host" camel:"CAMEL_KAMELET_AWS_SQS_SOURCE_AMAZONAWSHOST" default:"amazonaws.com"` // AWS host + Protocol string `json:"protocol" default:"https"` // Communication protocol (http/https) + QueueURL string `json:"queueURL,omitempty"` // Full SQS queue URL + Greedy bool `json:"greedy" default:"false"` // Greedy scheduler + Delay int `json:"delay" default:"500"` // Delay between polls in milliseconds + MaxMessagesPerPoll int `json:"maxMessagesPerPoll" default:"1"` // Max messages to return (1-10) + WaitTimeSeconds int `json:"waitTimeSeconds,omitempty"` // Wait time for messages + VisibilityTimeout int `json:"visibilityTimeout,omitempty"` // Visibility timeout in seconds +} + +type AWSDDBStreams struct { + AWSCommon `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON + Table string `json:"table,omitempty"` // The name of the DynamoDB table + StreamIteratorType string `json:"streamIteratorType,omitempty" default:"FROM_LATEST"` // Defines where in the DynamoDB stream to start getting records + Delay int `json:"delay,omitempty" default:"500"` // Delay in milliseconds before the next poll from the database +} + +type AWSSNS struct { + AWSCommon `json:",inline"` // Embeds AWSCommon to inherit its fields in JSON + Arn string `json:"arn,omitempty" camel:"CAMEL_KAMELET_AWS_SNS_SINK_TOPICNAMEORARN"` // SNS ARN + AutoCreateTopic bool `json:"autoCreateTopic" default:"false"` // Auto-create SNS topic +} diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go new file mode 100644 index 0000000000..3366df6745 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package v1alpha1 diff --git a/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9cf353963a --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/common/integration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,164 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCommon) DeepCopyInto(out *AWSCommon) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCommon. +func (in *AWSCommon) DeepCopy() *AWSCommon { + if in == nil { + return nil + } + out := new(AWSCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDDBStreams) DeepCopyInto(out *AWSDDBStreams) { + *out = *in + out.AWSCommon = in.AWSCommon + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDDBStreams. +func (in *AWSDDBStreams) DeepCopy() *AWSDDBStreams { + if in == nil { + return nil + } + out := new(AWSDDBStreams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSS3) DeepCopyInto(out *AWSS3) { + *out = *in + out.AWSCommon = in.AWSCommon + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSS3. +func (in *AWSS3) DeepCopy() *AWSS3 { + if in == nil { + return nil + } + out := new(AWSS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSNS) DeepCopyInto(out *AWSSNS) { + *out = *in + out.AWSCommon = in.AWSCommon + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSNS. +func (in *AWSSNS) DeepCopy() *AWSSNS { + if in == nil { + return nil + } + out := new(AWSSNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSSQS) DeepCopyInto(out *AWSSQS) { + *out = *in + out.AWSCommon = in.AWSCommon + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSQS. +func (in *AWSSQS) DeepCopy() *AWSSQS { + if in == nil { + return nil + } + out := new(AWSSQS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Auth) DeepCopyInto(out *Auth) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(Secret) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Auth. +func (in *Auth) DeepCopy() *Auth { + if in == nil { + return nil + } + out := new(Auth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go index c6f3e98cd0..c891f7c7b1 100644 --- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/register.go @@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &EventPolicy{}, &EventPolicyList{}, + &RequestReply{}, + &RequestReplyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go new file mode 100644 index 0000000000..739e922ca4 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +func (ep *RequestReply) ConvertTo(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj) +} + +// ConvertFrom implements apis.Convertible +func (ep *RequestReply) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj) +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go new file mode 100644 index 0000000000..c05f915d10 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_defaults.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "k8s.io/utils/ptr" + "knative.dev/eventing/pkg/apis/feature" + "knative.dev/pkg/apis" +) + +func (rr *RequestReply) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, rr.ObjectMeta) + rr.Spec.SetDefaults(ctx) +} + +func (rrs *RequestReplySpec) SetDefaults(ctx context.Context) { + if rrs.Timeout == nil || *rrs.Timeout == "" { + rrs.Timeout = ptr.To(feature.FromContextOrDefaults(ctx).RequestReplyDefaultTimeout()) + } + + if rrs.CorrelationAttribute == "" { + rrs.CorrelationAttribute = "correlationid" + } + + if rrs.ReplyAttribute == "" { + rrs.ReplyAttribute = "replyid" + } +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go new file mode 100644 index 0000000000..6701422908 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_lifecycle.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "knative.dev/pkg/apis" + v1 "knative.dev/pkg/apis/duck/v1" +) + +var requestReplyCondSet = apis.NewLivingConditionSet(RequestReplyConditionIngress, RequestReplyConditionTriggers, RequestReplyConditionAddressable, RequestReplyConditionEventPoliciesReady) + +const ( + RequestReplyConditionReady = apis.ConditionReady + RequestReplyConditionIngress apis.ConditionType = "IngressReady" + RequestReplyConditionTriggers apis.ConditionType = "TriggersReady" + RequestReplyConditionAddressable apis.ConditionType = "Addressable" + RequestReplyConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady" +) + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*RequestReply) GetConditionSet() apis.ConditionSet { + return requestReplyCondSet +} + +func (*RequestReplyStatus) GetConditionSet() apis.ConditionSet { + return requestReplyCondSet +} + +// GetCondition returns the condition currently associated with the given type, or nil. +func (rr *RequestReplyStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return requestReplyCondSet.Manage(rr).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (rr *RequestReplyStatus) IsReady() bool { + return rr.GetTopLevelCondition().IsTrue() +} + +// GetTopLevelCondition returns the top level Condition. +func (rr *RequestReplyStatus) GetTopLevelCondition() *apis.Condition { + return requestReplyCondSet.Manage(rr).GetTopLevelCondition() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (rr *RequestReplyStatus) InitializeConditions() { + requestReplyCondSet.Manage(rr).InitializeConditions() +} + +func (rr *RequestReplyStatus) SetAddress(address *v1.Addressable) { + rr.AddressStatus = v1.AddressStatus{ + Address: address, + } + + if address != nil && address.URL != nil { + rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionAddressable) + rr.AddressStatus.Address.Name = &address.URL.Scheme + } else { + rr.GetConditionSet().Manage(rr).MarkFalse(RequestReplyConditionAddressable, "nil URL", "URL is nil") + } +} + +func (rr *RequestReplyStatus) MarkTriggersReady() { + rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionTriggers) +} + +func (rr *RequestReplyStatus) MarkTriggersNotReadyWithReason(reason, messageFormat string, messageA ...interface{}) { + rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionTriggers, reason, messageFormat, messageA...) +} + +func (rr *RequestReplyStatus) MarkIngressReady() { + rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionIngress) +} + +func (rr *RequestReplyStatus) MarkIngressNotReadyWithReason(reason, messageFormat string, messageA ...interface{}) { + rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionIngress, reason, messageFormat, messageA...) +} + +func (rr *RequestReplyStatus) MarkEventPoliciesTrue() { + rr.GetConditionSet().Manage(rr).MarkTrue(RequestReplyConditionEventPoliciesReady) +} + +func (rr *RequestReplyStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) { + rr.GetConditionSet().Manage(rr).MarkTrueWithReason(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +func (rr *RequestReplyStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) { + rr.GetConditionSet().Manage(rr).MarkFalse(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +func (rr *RequestReplyStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) { + rr.GetConditionSet().Manage(rr).MarkUnknown(RequestReplyConditionEventPoliciesReady, reason, messageFormat, messageA...) +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go new file mode 100644 index 0000000000..3cb37e11b8 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_types.go @@ -0,0 +1,122 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestRepluy represents synchronous interface to sending and receiving events from a Broker. +type RequestReply struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of the EventPolicy. + Spec RequestReplySpec `json:"spec,omitempty"` + + // Status represents the current state of the EventPolicy. + // This data may be out of date. + // +optional + Status RequestReplyStatus `json:"status,omitempty"` +} + +var ( + // Check that EventPolicy can be validated, can be defaulted, and has immutable fields. + _ apis.Validatable = (*RequestReply)(nil) + _ apis.Defaultable = (*RequestReply)(nil) + + // Check that EventPolicy can return its spec untyped. + _ apis.HasSpec = (*RequestReply)(nil) + + _ runtime.Object = (*RequestReply)(nil) + + // Check that we can create OwnerReferences to an EventPolicy. + _ kmeta.OwnerRefable = (*RequestReply)(nil) + + // Check that the type conforms to the duck Knative Resource shape. + _ duckv1.KRShaped = (*RequestReply)(nil) +) + +type RequestReplySpec struct { + // BrokerRef contains the reference to the broker the RequestReply sends events to. + BrokerRef duckv1.KReference `json:"brokerRef"` + + CorrelationAttribute string `json:"correlationAttribute"` + + ReplyAttribute string `json:"replyAttribute"` + + Timeout *string `json:"timeout,omitempty"` + + Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"` + + Secrets []string `json:"secrets"` +} + +// RequestReplyStatus represents the current state of a RequestReply. +type RequestReplyStatus struct { + // inherits duck/v1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller. + // * Conditions - the latest available observations of a resource's current state. + duckv1.Status `json:",inline"` + + // AddressStatus is the part where the RequestReply fulfills the Addressable contract. + // It exposes the endpoint as an URI to get events delivered. + // +optional + duckv1.AddressStatus `json:",inline"` + + // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker. + // +optional + eventingduckv1.AppliedEventPoliciesStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestReplyList is a collection of RequestReplies. +type RequestReplyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []RequestReply `json:"items"` +} + +// GetGroupVersionKind returns GroupVersionKind for EventPolicy +func (rr *RequestReply) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("RequestReply") +} + +// GetUntypedSpec returns the spec of the EventPolicy. +func (rr *RequestReply) GetUntypedSpec() interface{} { + return rr.Spec +} + +// GetStatus retrieves the status of the EventPolicy. Implements the KRShaped interface. +func (rr *RequestReply) GetStatus() *duckv1.Status { + return &rr.Status.Status +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go new file mode 100644 index 0000000000..693c5a789d --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/requestreply_validation.go @@ -0,0 +1,83 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "strings" + + "github.com/rickb777/date/period" + + "knative.dev/pkg/apis" +) + +func (rr *RequestReply) Validate(ctx context.Context) *apis.FieldError { + ctx = apis.WithinParent(ctx, rr.ObjectMeta) + return rr.Spec.Validate(ctx).ViaField("spec") +} + +func (rrs *RequestReplySpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + if ke := rrs.BrokerRef.Validate(ctx); ke != nil { + errs = errs.Also(ke.ViaField("brokerRef")) + } + + if !strings.EqualFold(rrs.BrokerRef.Kind, "broker") { + errs = errs.Also(apis.ErrInvalidValue(rrs.BrokerRef.Kind, ".kind", "brokerRef kind must be Broker").ViaField("brokerRef")) + } + + if rrs.BrokerRef.Namespace != "" { + errs = errs.Also(apis.ErrDisallowedFields("namespace").ViaField("brokerRef")) + } + + if rrs.Delivery != nil { + if de := rrs.Delivery.Validate(ctx); de != nil { + errs = errs.Also(de.ViaField("delivery")) + } + } + + if rrs.Timeout != nil { + timeout, err := period.Parse(*rrs.Timeout) + if err != nil || timeout.IsZero() || timeout.IsNegative() { + errs = errs.Also(apis.ErrInvalidValue(*rrs.Timeout, "timeout")) + } + + } + + if len(rrs.Secrets) == 0 { + errs = errs.Also(apis.ErrInvalidValue(rrs.Secrets, "secrets", "one or more secrets must be provided")) + } + + if rrs.CorrelationAttribute == "" || + rrs.CorrelationAttribute == "id" || + rrs.CorrelationAttribute == "course" || + rrs.CorrelationAttribute == "specversion" || + rrs.CorrelationAttribute == "type" { + errs = errs.Also(apis.ErrInvalidValue(rrs.CorrelationAttribute, "correlationattribute", "correlationattribute must be non-empty and cannot be a core cloudevent attribute (id, type, specversion, source)")) + } + + if rrs.ReplyAttribute == "" || + rrs.ReplyAttribute == "id" || + rrs.ReplyAttribute == "course" || + rrs.ReplyAttribute == "specversion" || + rrs.ReplyAttribute == "type" { + errs = errs.Also(apis.ErrInvalidValue(rrs.ReplyAttribute, "replyattribute", "replyattribute must be non-empty and cannot be a core cloudevent attribute (id, type, specversion, source)")) + } + + return errs +} diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go index 068369c53b..4a30bba484 100644 --- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ package v1alpha1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + duckv1 "knative.dev/eventing/pkg/apis/duck/v1" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" ) @@ -256,3 +257,115 @@ func (in *EventPolicyToReference) DeepCopy() *EventPolicyToReference { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestReply) DeepCopyInto(out *RequestReply) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReply. +func (in *RequestReply) DeepCopy() *RequestReply { + if in == nil { + return nil + } + out := new(RequestReply) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestReply) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestReplyList) DeepCopyInto(out *RequestReplyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RequestReply, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplyList. +func (in *RequestReplyList) DeepCopy() *RequestReplyList { + if in == nil { + return nil + } + out := new(RequestReplyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestReplyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestReplySpec) DeepCopyInto(out *RequestReplySpec) { + *out = *in + in.BrokerRef.DeepCopyInto(&out.BrokerRef) + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(string) + **out = **in + } + if in.Delivery != nil { + in, out := &in.Delivery, &out.Delivery + *out = new(duckv1.DeliverySpec) + (*in).DeepCopyInto(*out) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplySpec. +func (in *RequestReplySpec) DeepCopy() *RequestReplySpec { + if in == nil { + return nil + } + out := new(RequestReplySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestReplyStatus) DeepCopyInto(out *RequestReplyStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.AddressStatus.DeepCopyInto(&out.AddressStatus) + in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestReplyStatus. +func (in *RequestReplyStatus) DeepCopy() *RequestReplyStatus { + if in == nil { + return nil + } + out := new(RequestReplyStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/features.go b/vendor/knative.dev/eventing/pkg/apis/feature/features.go index 98eeb2907c..cfe379141d 100644 --- a/vendor/knative.dev/eventing/pkg/apis/feature/features.go +++ b/vendor/knative.dev/eventing/pkg/apis/feature/features.go @@ -67,6 +67,10 @@ const ( // DefaultOIDCDiscoveryURL is the default OIDC Discovery URL used in most Kubernetes clusters. DefaultOIDCDiscoveryBaseURL Flag = "https://kubernetes.default.svc" + + // DefaultRequestReplyTimeout is a value for RequestReplyDefaultTimeout that indicates to timeout + // a RequestReply resource after 30 seconds by default. + DefaultRequestReplyTimeout Flag = "30s" ) // Flags is a map containing all the enabled/disabled flags for the experimental features. @@ -75,16 +79,17 @@ type Flags map[string]Flag func newDefaults() Flags { return map[string]Flag{ - KReferenceGroup: Disabled, - DeliveryRetryAfter: Disabled, - DeliveryTimeout: Enabled, - KReferenceMapping: Disabled, - TransportEncryption: Disabled, - OIDCAuthentication: Disabled, - EvenTypeAutoCreate: Disabled, - NewAPIServerFilters: Disabled, - AuthorizationDefaultMode: AuthorizationAllowSameNamespace, - OIDCDiscoveryBaseURL: DefaultOIDCDiscoveryBaseURL, + KReferenceGroup: Disabled, + DeliveryRetryAfter: Disabled, + DeliveryTimeout: Enabled, + KReferenceMapping: Disabled, + TransportEncryption: Disabled, + OIDCAuthentication: Disabled, + EvenTypeAutoCreate: Disabled, + NewAPIServerFilters: Disabled, + AuthorizationDefaultMode: AuthorizationAllowSameNamespace, + OIDCDiscoveryBaseURL: DefaultOIDCDiscoveryBaseURL, + RequestReplyDefaultTimeout: DefaultRequestReplyTimeout, } } @@ -151,6 +156,19 @@ func (e Flags) OIDCDiscoveryBaseURL() string { return string(discoveryUrl) } +func (e Flags) RequestReplyDefaultTimeout() string { + if e == nil { + return string(DefaultRequestReplyTimeout) + } + + timeout, ok := e[RequestReplyDefaultTimeout] + if !ok { + return string(DefaultRequestReplyTimeout) + } + + return string(timeout) +} + func (e Flags) String() string { return fmt.Sprintf("%+v", map[string]Flag(e)) } diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go index e21056eb44..ba163868c7 100644 --- a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go +++ b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go @@ -17,16 +17,17 @@ limitations under the License. package feature const ( - KReferenceGroup = "kreference-group" - DeliveryRetryAfter = "delivery-retryafter" - DeliveryTimeout = "delivery-timeout" - KReferenceMapping = "kreference-mapping" - TransportEncryption = "transport-encryption" - EvenTypeAutoCreate = "eventtype-auto-create" - OIDCAuthentication = "authentication-oidc" - NodeSelectorLabel = "apiserversources-nodeselector-" - CrossNamespaceEventLinks = "cross-namespace-event-links" - NewAPIServerFilters = "new-apiserversource-filters" - AuthorizationDefaultMode = "default-authorization-mode" - OIDCDiscoveryBaseURL = "oidc-discovery-base-url" + KReferenceGroup = "kreference-group" + DeliveryRetryAfter = "delivery-retryafter" + DeliveryTimeout = "delivery-timeout" + KReferenceMapping = "kreference-mapping" + TransportEncryption = "transport-encryption" + EvenTypeAutoCreate = "eventtype-auto-create" + OIDCAuthentication = "authentication-oidc" + NodeSelectorLabel = "apiserversources-nodeselector-" + CrossNamespaceEventLinks = "cross-namespace-event-links" + NewAPIServerFilters = "new-apiserversource-filters" + AuthorizationDefaultMode = "default-authorization-mode" + OIDCDiscoveryBaseURL = "oidc-discovery-base-url" + RequestReplyDefaultTimeout = "requestreply-default-timeout" ) diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go index d45d1a971b..35ef3988ae 100644 --- a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go +++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go @@ -19,10 +19,11 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/kmeta" + + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" ) // +genclient @@ -44,6 +45,14 @@ type InMemoryChannel struct { Status InMemoryChannelStatus `json:"status,omitempty"` } +var ( + // AsyncHandlerAnnotation controls whether InMemoryChannel uses the async handler. + // + // Async handler is subject to event loss since it responds with 200 before forwarding the event + // to all subscriptions. + AsyncHandlerAnnotation = SchemeGroupVersion.Group + "/async-handler" +) + var ( // Check that InMemoryChannel can be validated and defaulted. _ apis.Validatable = (*InMemoryChannel)(nil) diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go index 676fa75e84..1994d57918 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sinks/register.go +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go @@ -33,6 +33,12 @@ var ( Group: GroupName, Resource: "jobsinks", } + + // IntegrationSinkResource respresents a Knative Eventing sink IntegrationSink + IntegrationSinkResource = schema.GroupResource{ + Group: GroupName, + Resource: "integrationsinks", + } ) type Config struct { diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go new file mode 100644 index 0000000000..8f41d2da41 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_conversion.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +// Converts source from v1alpha1.IntegrationSink into a higher version. +func (sink *IntegrationSink) ConvertTo(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +// Converts source from a higher version into v1beta2.IntegrationSink +func (sink *IntegrationSink) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink) +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go new file mode 100644 index 0000000000..f77df267e8 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +func (sink *IntegrationSink) SetDefaults(ctx context.Context) { + sink.Spec.SetDefaults(ctx) +} + +func (sink *IntegrationSinkSpec) SetDefaults(ctx context.Context) { +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go new file mode 100644 index 0000000000..1ad33e2cae --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_lifecycle.go @@ -0,0 +1,123 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +const ( + // IntegrationSinkConditionReady has status True when the IntegrationSink is ready to send events. + IntegrationSinkConditionReady = apis.ConditionReady + + IntegrationSinkConditionAddressable apis.ConditionType = "Addressable" + + // IntegrationSinkConditionDeploymentReady has status True when the IntegrationSink has been configured with a deployment. + IntegrationSinkConditionDeploymentReady apis.ConditionType = "DeploymentReady" + + // IntegrationSinkConditionEventPoliciesReady has status True when all the applying EventPolicies for this + // IntegrationSink are ready. + IntegrationSinkConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady" +) + +var IntegrationSinkCondSet = apis.NewLivingConditionSet( + IntegrationSinkConditionAddressable, + IntegrationSinkConditionDeploymentReady, + IntegrationSinkConditionEventPoliciesReady, +) + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*IntegrationSink) GetConditionSet() apis.ConditionSet { + return IntegrationSinkCondSet +} + +// GetCondition returns the condition currently associated with the given type, or nil. +func (s *IntegrationSinkStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return IntegrationSinkCondSet.Manage(s).GetCondition(t) +} + +// GetTopLevelCondition returns the top level Condition. +func (ps *IntegrationSinkStatus) GetTopLevelCondition() *apis.Condition { + return IntegrationSinkCondSet.Manage(ps).GetTopLevelCondition() +} + +// IsReady returns true if the resource is ready overall. +func (s *IntegrationSinkStatus) IsReady() bool { + return IntegrationSinkCondSet.Manage(s).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (s *IntegrationSinkStatus) InitializeConditions() { + IntegrationSinkCondSet.Manage(s).InitializeConditions() +} + +// MarkAddressableReady marks the Addressable condition to True. +func (s *IntegrationSinkStatus) MarkAddressableReady() { + IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionAddressable) +} + +// MarkEventPoliciesFailed marks the EventPoliciesReady condition to False with the given reason and message. +func (s *IntegrationSinkStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) { + IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +// MarkEventPoliciesUnknown marks the EventPoliciesReady condition to Unknown with the given reason and message. +func (s *IntegrationSinkStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) { + IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +// MarkEventPoliciesTrue marks the EventPoliciesReady condition to True. +func (s *IntegrationSinkStatus) MarkEventPoliciesTrue() { + IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionEventPoliciesReady) +} + +// MarkEventPoliciesTrueWithReason marks the EventPoliciesReady condition to True with the given reason and message. +func (s *IntegrationSinkStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) { + IntegrationSinkCondSet.Manage(s).MarkTrueWithReason(IntegrationSinkConditionEventPoliciesReady, reason, messageFormat, messageA...) +} + +func (s *IntegrationSinkStatus) PropagateDeploymentStatus(d *appsv1.DeploymentStatus) { + deploymentAvailableFound := false + for _, cond := range d.Conditions { + if cond.Type == appsv1.DeploymentAvailable { + deploymentAvailableFound = true + if cond.Status == corev1.ConditionTrue { + IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionDeploymentReady) + } else if cond.Status == corev1.ConditionFalse { + IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionDeploymentReady, cond.Reason, cond.Message) + } else if cond.Status == corev1.ConditionUnknown { + IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionDeploymentReady, cond.Reason, cond.Message) + } + } + } + if !deploymentAvailableFound { + IntegrationSinkCondSet.Manage(s).MarkUnknown(IntegrationSinkConditionDeploymentReady, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d) + } +} + +func (s *IntegrationSinkStatus) SetAddress(address *duckv1.Addressable) { + s.Address = address + if address == nil || address.URL.IsEmpty() { + IntegrationSinkCondSet.Manage(s).MarkFalse(IntegrationSinkConditionAddressable, "EmptyHostname", "hostname is the empty string") + } else { + IntegrationSinkCondSet.Manage(s).MarkTrue(IntegrationSinkConditionAddressable) + + } +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go new file mode 100644 index 0000000000..efc15e62b8 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_types.go @@ -0,0 +1,118 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/eventing/pkg/apis/common/integration/v1alpha1" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" +) + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:defaulter-gen=true + +// IntegrationSink is the Schema for the IntegrationSink API. +type IntegrationSink struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IntegrationSinkSpec `json:"spec,omitempty"` + Status IntegrationSinkStatus `json:"status,omitempty"` +} + +// Check the interfaces that JobSink should be implementing. +var ( + _ runtime.Object = (*IntegrationSink)(nil) + _ kmeta.OwnerRefable = (*IntegrationSink)(nil) + _ apis.Validatable = (*IntegrationSink)(nil) + _ apis.Defaultable = (*IntegrationSink)(nil) + _ apis.HasSpec = (*IntegrationSink)(nil) + _ duckv1.KRShaped = (*IntegrationSink)(nil) + _ apis.Convertible = (*JobSink)(nil) +) + +type IntegrationSinkSpec struct { + Aws *Aws `json:"aws,omitempty"` // AWS source configuration + Log *Log `json:"log,omitempty"` // Log sink configuration +} + +type Log struct { + LoggerName string `json:"loggerName,omitempty" default:"log-sink"` // Name of the logging category to use + Level string `json:"level,omitempty" default:"INFO"` // Logging level to use + LogMask bool `json:"logMask,omitempty" default:"false"` // Mask sensitive information in the log + Marker string `json:"marker,omitempty"` // An optional Marker name to use + Multiline bool `json:"multiline,omitempty" default:"false"` // If enabled, outputs each information on a newline + ShowAllProperties bool `json:"showAllProperties,omitempty" default:"false"` // Show all of the exchange properties (both internal and custom) + ShowBody bool `json:"showBody,omitempty" default:"true"` // Show the message body + ShowBodyType bool `json:"showBodyType,omitempty" default:"true"` // Show the body Java type + ShowExchangePattern bool `json:"showExchangePattern,omitempty" default:"true"` // Show the Message Exchange Pattern (MEP) + ShowHeaders bool `json:"showHeaders,omitempty" default:"false"` // Show the headers received + ShowProperties bool `json:"showProperties,omitempty" default:"false"` // Show the exchange properties (only custom) + ShowStreams bool `json:"showStreams,omitempty" default:"false"` // Show the stream bodies + ShowCachedStreams bool `json:"showCachedStreams,omitempty" default:"true"` // Show cached stream bodies +} + +type Aws struct { + S3 *v1alpha1.AWSS3 `json:"s3,omitempty"` // S3 source configuration + SQS *v1alpha1.AWSSQS `json:"sqs,omitempty"` // SQS source configuration + SNS *v1alpha1.AWSSNS `json:"sns,omitempty"` // SNS source configuration + Auth *v1alpha1.Auth `json:"auth,omitempty"` +} + +type IntegrationSinkStatus struct { + duckv1.Status `json:",inline"` + + // AddressStatus is the part where the JobSink fulfills the Addressable contract. + // It exposes the endpoint as an URI to get events delivered. + // +optional + duckv1.AddressStatus `json:",inline"` + + // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this JobSink + // +optional + eventingduckv1.AppliedEventPoliciesStatus `json:",inline"` +} + +// GetGroupVersionKind returns the GroupVersionKind. +func (*IntegrationSink) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("IntegrationSink") +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IntegrationSinkList contains a list of IntegrationSink +type IntegrationSinkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IntegrationSink `json:"items"` +} + +// GetUntypedSpec returns the spec of the IntegrationSink. +func (c *IntegrationSink) GetUntypedSpec() interface{} { + return c.Spec +} + +// GetStatus retrieves the status of the IntegrationSink. Implements the KRShaped interface. +func (c *IntegrationSink) GetStatus() *duckv1.Status { + return &c.Status.Status +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go new file mode 100644 index 0000000000..7f24d9ca5a --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/integration_sink_validation.go @@ -0,0 +1,97 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "knative.dev/pkg/apis" +) + +func (sink *IntegrationSink) Validate(ctx context.Context) *apis.FieldError { + ctx = apis.WithinParent(ctx, sink.ObjectMeta) + return sink.Spec.Validate(ctx).ViaField("spec") +} + +func (spec *IntegrationSinkSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + // Count how many fields are set to ensure mutual exclusivity + sinkSetCount := 0 + if spec.Log != nil { + sinkSetCount++ + } + if spec.Aws != nil { + if spec.Aws.S3 != nil { + sinkSetCount++ + } + if spec.Aws.SQS != nil { + sinkSetCount++ + } + if spec.Aws.SNS != nil { + sinkSetCount++ + } + } + + // Validate that only one sink field is set + if sinkSetCount > 1 { + errs = errs.Also(apis.ErrGeneric("only one sink type can be set", "spec")) + } else if sinkSetCount == 0 { + errs = errs.Also(apis.ErrGeneric("at least one sink type must be specified", "spec")) + } + + // Only perform AWS-specific validation if exactly one AWS sink is configured + if sinkSetCount == 1 && spec.Aws != nil { + if spec.Aws.S3 != nil || spec.Aws.SQS != nil || spec.Aws.SNS != nil { + // Check that AWS Auth is properly configured + if !spec.Aws.Auth.HasAuth() { + errs = errs.Also(apis.ErrMissingField("aws.auth.secret.ref.name")) + } + } + + // Additional validation for AWS S3 required fields + if spec.Aws.S3 != nil { + if spec.Aws.S3.Arn == "" { + errs = errs.Also(apis.ErrMissingField("aws.s3.arn")) + } + if spec.Aws.S3.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.s3.region")) + } + } + + // Additional validation for AWS SQS required fields + if spec.Aws.SQS != nil { + if spec.Aws.SQS.Arn == "" { + errs = errs.Also(apis.ErrMissingField("aws.sqs.arn")) + } + if spec.Aws.SQS.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.sqs.region")) + } + } + // Additional validation for AWS SNS required fields + if spec.Aws.SNS != nil { + if spec.Aws.SNS.Arn == "" { + errs = errs.Also(apis.ErrMissingField("aws.sns.arn")) + } + if spec.Aws.SNS.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.sns.region")) + } + } + } + + return errs +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go index 13f62e868b..3bd18fbf3f 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go @@ -18,7 +18,48 @@ package v1alpha1 import ( "context" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" ) func (sink *JobSink) SetDefaults(ctx context.Context) { + if sink.Spec.Job != nil { + setBatchJobDefaults(sink.Spec.Job) + } +} + +func setBatchJobDefaults(job *batchv1.Job) { + for i := range job.Spec.Template.Spec.Containers { + executionModeFound := false + for j := range job.Spec.Template.Spec.Containers[i].Env { + if job.Spec.Template.Spec.Containers[i].Env[j].Name == ExecutionModeEnvVar { + executionModeFound = true + break + } + } + if executionModeFound { + continue + } + job.Spec.Template.Spec.Containers[i].Env = append(job.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ + Name: ExecutionModeEnvVar, + Value: string(ExecutionModeBatch), + }) + } + for i := range job.Spec.Template.Spec.InitContainers { + executionModeFound := false + for j := range job.Spec.Template.Spec.InitContainers[i].Env { + if job.Spec.Template.Spec.InitContainers[i].Env[j].Name == ExecutionModeEnvVar { + executionModeFound = true + break + } + } + if executionModeFound { + continue + } + job.Spec.Template.Spec.InitContainers[i].Env = append(job.Spec.Template.Spec.InitContainers[i].Env, corev1.EnvVar{ + Name: ExecutionModeEnvVar, + Value: string(ExecutionModeBatch), + }) + } } diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go index 857e9f79a1..5cc3a0faf6 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go @@ -22,9 +22,20 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/kmeta" + + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +const ( + ExecutionModeEnvVar = "K_EXECUTION_MODE" +) + +type ExecutionMode string + +const ( + ExecutionModeBatch ExecutionMode = "batch" ) // +genclient diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go index 827ebc28b4..89e4f3fbec 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go @@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobSink{}, &JobSinkList{}, + &IntegrationSink{}, + &IntegrationSinkList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go index 58c9fdfaf8..a0265e79c0 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go @@ -24,8 +24,151 @@ package v1alpha1 import ( v1 "k8s.io/api/batch/v1" runtime "k8s.io/apimachinery/pkg/runtime" + integrationv1alpha1 "knative.dev/eventing/pkg/apis/common/integration/v1alpha1" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Aws) DeepCopyInto(out *Aws) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(integrationv1alpha1.AWSS3) + **out = **in + } + if in.SQS != nil { + in, out := &in.SQS, &out.SQS + *out = new(integrationv1alpha1.AWSSQS) + **out = **in + } + if in.SNS != nil { + in, out := &in.SNS, &out.SNS + *out = new(integrationv1alpha1.AWSSNS) + **out = **in + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(integrationv1alpha1.Auth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Aws. +func (in *Aws) DeepCopy() *Aws { + if in == nil { + return nil + } + out := new(Aws) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSink) DeepCopyInto(out *IntegrationSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSink. +func (in *IntegrationSink) DeepCopy() *IntegrationSink { + if in == nil { + return nil + } + out := new(IntegrationSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSinkList) DeepCopyInto(out *IntegrationSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IntegrationSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkList. +func (in *IntegrationSinkList) DeepCopy() *IntegrationSinkList { + if in == nil { + return nil + } + out := new(IntegrationSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSinkSpec) DeepCopyInto(out *IntegrationSinkSpec) { + *out = *in + if in.Aws != nil { + in, out := &in.Aws, &out.Aws + *out = new(Aws) + (*in).DeepCopyInto(*out) + } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(Log) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkSpec. +func (in *IntegrationSinkSpec) DeepCopy() *IntegrationSinkSpec { + if in == nil { + return nil + } + out := new(IntegrationSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSinkStatus) DeepCopyInto(out *IntegrationSinkStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.AddressStatus.DeepCopyInto(&out.AddressStatus) + in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSinkStatus. +func (in *IntegrationSinkStatus) DeepCopy() *IntegrationSinkStatus { + if in == nil { + return nil + } + out := new(IntegrationSinkStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobSink) DeepCopyInto(out *JobSink) { *out = *in @@ -143,3 +286,19 @@ func (in *JobStatus) DeepCopy() *JobStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Log) DeepCopyInto(out *Log) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Log. +func (in *Log) DeepCopy() *Log { + if in == nil { + return nil + } + out := new(Log) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/register.go index 04716c8ca6..3571d0a17b 100644 --- a/vendor/knative.dev/eventing/pkg/apis/sources/register.go +++ b/vendor/knative.dev/eventing/pkg/apis/sources/register.go @@ -56,4 +56,10 @@ var ( Group: GroupName, Resource: "containersources", } + + // IntegrationSourceResource respresents a Knative Eventing Sources IntegrationSource + IntegrationSourceResource = schema.GroupResource{ + Group: GroupName, + Resource: "integrationsources", + } ) diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go new file mode 100644 index 0000000000..76cd299b7b --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the sources v1alpha1 API group. +// +k8s:deepcopy-gen=package +// +groupName=sources.knative.dev +package v1alpha1 diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go new file mode 100644 index 0000000000..32bfc7cc4b --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_conversion.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +// Converts source from v1alpha1.IntegrationSource into a higher version. +func (source *IntegrationSource) ConvertTo(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", source) +} + +// ConvertFrom implements apis.Convertible +// Converts source from a higher version into v1beta2.IntegrationSource +func (source *IntegrationSource) ConvertFrom(ctx context.Context, obj apis.Convertible) error { + return fmt.Errorf("v1alpha1 is the highest known version, got: %T", source) +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go new file mode 100644 index 0000000000..70d82c73e5 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +func (source *IntegrationSource) SetDefaults(ctx context.Context) { + source.Spec.SetDefaults(ctx) +} + +func (source *IntegrationSourceSpec) SetDefaults(ctx context.Context) { +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go new file mode 100644 index 0000000000..cc1270fb8b --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_lifecycle.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "knative.dev/eventing/pkg/apis/sources/v1" + "knative.dev/pkg/apis" +) + +const ( + // IntegrationSourceConditionReady has status True when the IntegrationSource is ready to send events. + IntegrationSourceConditionReady = apis.ConditionReady + + // IntegrationSourceConditionContainerSourceReady has status True when the IntegrationSource's ContainerSource is ready. + IntegrationSourceConditionContainerSourceReady apis.ConditionType = "ContainerSourceReady" +) + +var IntegrationCondSet = apis.NewLivingConditionSet( + IntegrationSourceConditionContainerSourceReady, +) + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*IntegrationSource) GetConditionSet() apis.ConditionSet { + return IntegrationCondSet +} + +// GetTopLevelCondition returns the top level condition. +func (s *IntegrationSourceStatus) GetTopLevelCondition() *apis.Condition { + return IntegrationCondSet.Manage(s).GetTopLevelCondition() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (s *IntegrationSourceStatus) InitializeConditions() { + IntegrationCondSet.Manage(s).InitializeConditions() +} + +func (iss *IntegrationSourceStatus) IsReady() bool { + return IntegrationCondSet.Manage(iss).IsHappy() +} + +func (s *IntegrationSourceStatus) PropagateContainerSourceStatus(status *v1.ContainerSourceStatus) { + // ContainerSource status has all we need, hence deep copy it. + s.SourceStatus = *status.SourceStatus.DeepCopy() + + cond := status.GetCondition(apis.ConditionReady) + switch { + case cond == nil: + IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, "", "") + case cond.Status == corev1.ConditionTrue: + IntegrationCondSet.Manage(s).MarkTrue(IntegrationSourceConditionContainerSourceReady) + case cond.Status == corev1.ConditionFalse: + IntegrationCondSet.Manage(s).MarkFalse(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message) + case cond.Status == corev1.ConditionUnknown: + IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message) + default: + IntegrationCondSet.Manage(s).MarkUnknown(IntegrationSourceConditionContainerSourceReady, cond.Reason, cond.Message) + } + + // Propagate ContainerSources AuthStatus to IntegrationSources AuthStatus + s.Auth = status.Auth +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go new file mode 100644 index 0000000000..2eec773bcb --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_types.go @@ -0,0 +1,113 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/eventing/pkg/apis/common/integration/v1alpha1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" +) + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IntegrationSource is the Schema for the Integrationsources API +type IntegrationSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IntegrationSourceSpec `json:"spec,omitempty"` + Status IntegrationSourceStatus `json:"status,omitempty"` +} + +var ( + _ runtime.Object = (*IntegrationSource)(nil) + _ kmeta.OwnerRefable = (*IntegrationSource)(nil) + _ apis.Validatable = (*IntegrationSource)(nil) + _ apis.Defaultable = (*IntegrationSource)(nil) + _ apis.HasSpec = (*IntegrationSource)(nil) + _ duckv1.KRShaped = (*IntegrationSource)(nil) + _ apis.Convertible = (*IntegrationSource)(nil) +) + +// IntegrationSourceSpec defines the desired state of IntegrationSource +type IntegrationSourceSpec struct { + // inherits duck/v1 SourceSpec, which currently provides: + // * Sink - a reference to an object that will resolve to a domain name or + // a URI directly to use as the sink. + // * CloudEventOverrides - defines overrides to control the output format + // and modifications of the event sent to the sink. + duckv1.SourceSpec `json:",inline"` + + Aws *Aws `json:"aws,omitempty"` // AWS source configuration + Timer *Timer `json:"timer,omitempty"` // Timer configuration +} + +type Timer struct { + Period int `json:"period" default:"1000"` // Interval (in milliseconds) between producing messages + Message string `json:"message"` // Message to generate + ContentType string `json:"contentType" default:"text/plain"` // Content type of generated message + RepeatCount int `json:"repeatCount,omitempty"` // Max number of fires (optional) +} + +type Aws struct { + S3 *v1alpha1.AWSS3 `json:"s3,omitempty"` // S3 source configuration + SQS *v1alpha1.AWSSQS `json:"sqs,omitempty"` // SQS source configuration + DDBStreams *v1alpha1.AWSDDBStreams `json:"ddbStreams,omitempty"` // DynamoDB Streams source configuration + Auth *v1alpha1.Auth `json:"auth,omitempty"` +} + +// GetGroupVersionKind returns the GroupVersionKind. +func (*IntegrationSource) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("IntegrationSource") +} + +// IntegrationSourceStatus defines the observed state of IntegrationSource +type IntegrationSourceStatus struct { + // inherits duck/v1 SourceStatus, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last + // processed by the controller. + // * Conditions - the latest available observations of a resource's current + // state. + // * SinkURI - the current active sink URI that has been configured for the + // Source. + duckv1.SourceStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IntegrationSourceList contains a list of IntegrationSource +type IntegrationSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IntegrationSource `json:"items"` +} + +// GetUntypedSpec returns the spec of the IntegrationSource. +func (c *IntegrationSource) GetUntypedSpec() interface{} { + return c.Spec +} + +// GetStatus retrieves the status of the IntegrationSource. Implements the KRShaped interface. +func (c *IntegrationSource) GetStatus() *duckv1.Status { + return &c.Status.Status +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go new file mode 100644 index 0000000000..935e18e384 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/integration_validation.go @@ -0,0 +1,98 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "knative.dev/pkg/apis" +) + +func (source *IntegrationSource) Validate(ctx context.Context) *apis.FieldError { + ctx = apis.WithinParent(ctx, source.ObjectMeta) + return source.Spec.Validate(ctx).ViaField("spec") +} + +func (spec *IntegrationSourceSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + // Count how many fields are set to ensure mutual exclusivity + sourceSetCount := 0 + if spec.Timer != nil { + sourceSetCount++ + } + if spec.Aws != nil { + if spec.Aws.S3 != nil { + sourceSetCount++ + } + if spec.Aws.SQS != nil { + sourceSetCount++ + } + if spec.Aws.DDBStreams != nil { + sourceSetCount++ + } + } + + // Validate that only one source field is set + if sourceSetCount > 1 { + errs = errs.Also(apis.ErrGeneric("only one source type can be set", "spec")) + } else if sourceSetCount == 0 { + errs = errs.Also(apis.ErrGeneric("at least one source type must be specified", "spec")) + } + + // Only perform AWS-specific validation if exactly one AWS source is configured + if sourceSetCount == 1 && spec.Aws != nil { + if spec.Aws.S3 != nil || spec.Aws.SQS != nil || spec.Aws.DDBStreams != nil { + // Check that AWS Auth is properly configured + if !spec.Aws.Auth.HasAuth() { + errs = errs.Also(apis.ErrMissingField("aws.auth.secret.ref.name")) + } + } + + // Additional validation for AWS S3 required fields + if spec.Aws.S3 != nil { + if spec.Aws.S3.Arn == "" { + errs = errs.Also(apis.ErrMissingField("aws.s3.arn")) + } + if spec.Aws.S3.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.s3.region")) + } + } + + // Additional validation for AWS SQS required fields + if spec.Aws.SQS != nil { + if spec.Aws.SQS.Arn == "" { + errs = errs.Also(apis.ErrMissingField("aws.sqs.arn")) + } + if spec.Aws.SQS.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.sqs.region")) + } + } + + // Additional validation for AWS DDBStreams required fields + if spec.Aws.DDBStreams != nil { + if spec.Aws.DDBStreams.Table == "" { + errs = errs.Also(apis.ErrMissingField("aws.ddb-streams.table")) + } + if spec.Aws.DDBStreams.Region == "" { + errs = errs.Also(apis.ErrMissingField("aws.ddb-streams.region")) + } + } + } + + return errs +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go new file mode 100644 index 0000000000..a812c885f5 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/eventing/pkg/apis/sources" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &IntegrationSource{}, + &IntegrationSourceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..7e2857fded --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,184 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + integrationv1alpha1 "knative.dev/eventing/pkg/apis/common/integration/v1alpha1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Aws) DeepCopyInto(out *Aws) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(integrationv1alpha1.AWSS3) + **out = **in + } + if in.SQS != nil { + in, out := &in.SQS, &out.SQS + *out = new(integrationv1alpha1.AWSSQS) + **out = **in + } + if in.DDBStreams != nil { + in, out := &in.DDBStreams, &out.DDBStreams + *out = new(integrationv1alpha1.AWSDDBStreams) + **out = **in + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(integrationv1alpha1.Auth) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Aws. +func (in *Aws) DeepCopy() *Aws { + if in == nil { + return nil + } + out := new(Aws) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSource) DeepCopyInto(out *IntegrationSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSource. +func (in *IntegrationSource) DeepCopy() *IntegrationSource { + if in == nil { + return nil + } + out := new(IntegrationSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSourceList) DeepCopyInto(out *IntegrationSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IntegrationSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceList. +func (in *IntegrationSourceList) DeepCopy() *IntegrationSourceList { + if in == nil { + return nil + } + out := new(IntegrationSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IntegrationSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSourceSpec) DeepCopyInto(out *IntegrationSourceSpec) { + *out = *in + in.SourceSpec.DeepCopyInto(&out.SourceSpec) + if in.Aws != nil { + in, out := &in.Aws, &out.Aws + *out = new(Aws) + (*in).DeepCopyInto(*out) + } + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = new(Timer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceSpec. +func (in *IntegrationSourceSpec) DeepCopy() *IntegrationSourceSpec { + if in == nil { + return nil + } + out := new(IntegrationSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntegrationSourceStatus) DeepCopyInto(out *IntegrationSourceStatus) { + *out = *in + in.SourceStatus.DeepCopyInto(&out.SourceStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntegrationSourceStatus. +func (in *IntegrationSourceStatus) DeepCopy() *IntegrationSourceStatus { + if in == nil { + return nil + } + out := new(IntegrationSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timer) DeepCopyInto(out *Timer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timer. +func (in *Timer) DeepCopy() *Timer { + if in == nil { + return nil + } + out := new(Timer) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go index 72890b65cc..dcb0849852 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go @@ -34,6 +34,7 @@ import ( messagingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1" sinksv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1" sourcesv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1" + sourcesv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1" sourcesv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2" ) @@ -48,6 +49,7 @@ type Interface interface { MessagingV1() messagingv1.MessagingV1Interface SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface SourcesV1() sourcesv1.SourcesV1Interface + SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface } @@ -63,6 +65,7 @@ type Clientset struct { messagingV1 *messagingv1.MessagingV1Client sinksV1alpha1 *sinksv1alpha1.SinksV1alpha1Client sourcesV1 *sourcesv1.SourcesV1Client + sourcesV1alpha1 *sourcesv1alpha1.SourcesV1alpha1Client sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client } @@ -111,6 +114,11 @@ func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface { return c.sourcesV1 } +// SourcesV1alpha1 retrieves the SourcesV1alpha1Client +func (c *Clientset) SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface { + return c.sourcesV1alpha1 +} + // SourcesV1beta2 retrieves the SourcesV1beta2Client func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface { return c.sourcesV1beta2 @@ -196,6 +204,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.sourcesV1alpha1, err = sourcesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.sourcesV1beta2, err = sourcesv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -230,6 +242,7 @@ func New(c rest.Interface) *Clientset { cs.messagingV1 = messagingv1.New(c) cs.sinksV1alpha1 = sinksv1alpha1.New(c) cs.sourcesV1 = sourcesv1.New(c) + cs.sourcesV1alpha1 = sourcesv1alpha1.New(c) cs.sourcesV1beta2 = sourcesv1beta2.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go index 8346286413..90b59fe63a 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -43,6 +43,8 @@ import ( fakesinksv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake" sourcesv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1" fakesourcesv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/fake" + sourcesv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1" + fakesourcesv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake" sourcesv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2" fakesourcesv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake" ) @@ -142,6 +144,11 @@ func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface { return &fakesourcesv1.FakeSourcesV1{Fake: &c.Fake} } +// SourcesV1alpha1 retrieves the SourcesV1alpha1Client +func (c *Clientset) SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface { + return &fakesourcesv1alpha1.FakeSourcesV1alpha1{Fake: &c.Fake} +} + // SourcesV1beta2 retrieves the SourcesV1beta2Client func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface { return &fakesourcesv1beta2.FakeSourcesV1beta2{Fake: &c.Fake} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go index 6fc03b4ba5..834b36ce0a 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/fake/register.go @@ -33,6 +33,7 @@ import ( messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1" + sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2" ) @@ -49,6 +50,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ messagingv1.AddToScheme, sinksv1alpha1.AddToScheme, sourcesv1.AddToScheme, + sourcesv1alpha1.AddToScheme, sourcesv1beta2.AddToScheme, } diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go index 5d2955e038..786a698e53 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go @@ -33,6 +33,7 @@ import ( messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1" + sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2" ) @@ -49,6 +50,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ messagingv1.AddToScheme, sinksv1alpha1.AddToScheme, sourcesv1.AddToScheme, + sourcesv1alpha1.AddToScheme, sourcesv1beta2.AddToScheme, } diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go index e901caf43a..5ffd41edd3 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go @@ -29,6 +29,7 @@ import ( type EventingV1alpha1Interface interface { RESTClient() rest.Interface EventPoliciesGetter + RequestRepliesGetter } // EventingV1alpha1Client is used to interact with features provided by the eventing.knative.dev group. @@ -40,6 +41,10 @@ func (c *EventingV1alpha1Client) EventPolicies(namespace string) EventPolicyInte return newEventPolicies(c, namespace) } +func (c *EventingV1alpha1Client) RequestReplies(namespace string) RequestReplyInterface { + return newRequestReplies(c, namespace) +} + // NewForConfig creates a new EventingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go index 958cd86614..5d628625ca 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go @@ -32,6 +32,10 @@ func (c *FakeEventingV1alpha1) EventPolicies(namespace string) v1alpha1.EventPol return &FakeEventPolicies{c, namespace} } +func (c *FakeEventingV1alpha1) RequestReplies(namespace string) v1alpha1.RequestReplyInterface { + return &FakeRequestReplies{c, namespace} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeEventingV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_requestreply.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_requestreply.go new file mode 100644 index 0000000000..a1b6fdc6a2 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_requestreply.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1" +) + +// FakeRequestReplies implements RequestReplyInterface +type FakeRequestReplies struct { + Fake *FakeEventingV1alpha1 + ns string +} + +var requestrepliesResource = v1alpha1.SchemeGroupVersion.WithResource("requestreplies") + +var requestrepliesKind = v1alpha1.SchemeGroupVersion.WithKind("RequestReply") + +// Get takes name of the requestReply, and returns the corresponding requestReply object, and an error if there is any. +func (c *FakeRequestReplies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RequestReply, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(requestrepliesResource, c.ns, name), &v1alpha1.RequestReply{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RequestReply), err +} + +// List takes label and field selectors, and returns the list of RequestReplies that match those selectors. +func (c *FakeRequestReplies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RequestReplyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(requestrepliesResource, requestrepliesKind, c.ns, opts), &v1alpha1.RequestReplyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RequestReplyList{ListMeta: obj.(*v1alpha1.RequestReplyList).ListMeta} + for _, item := range obj.(*v1alpha1.RequestReplyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested requestReplies. +func (c *FakeRequestReplies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(requestrepliesResource, c.ns, opts)) + +} + +// Create takes the representation of a requestReply and creates it. Returns the server's representation of the requestReply, and an error, if there is any. +func (c *FakeRequestReplies) Create(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.CreateOptions) (result *v1alpha1.RequestReply, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(requestrepliesResource, c.ns, requestReply), &v1alpha1.RequestReply{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RequestReply), err +} + +// Update takes the representation of a requestReply and updates it. Returns the server's representation of the requestReply, and an error, if there is any. +func (c *FakeRequestReplies) Update(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (result *v1alpha1.RequestReply, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(requestrepliesResource, c.ns, requestReply), &v1alpha1.RequestReply{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RequestReply), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRequestReplies) UpdateStatus(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (*v1alpha1.RequestReply, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(requestrepliesResource, "status", c.ns, requestReply), &v1alpha1.RequestReply{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RequestReply), err +} + +// Delete takes name of the requestReply and deletes it. Returns an error if one occurs. +func (c *FakeRequestReplies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(requestrepliesResource, c.ns, name, opts), &v1alpha1.RequestReply{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRequestReplies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(requestrepliesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.RequestReplyList{}) + return err +} + +// Patch applies the patch and returns the patched requestReply. +func (c *FakeRequestReplies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RequestReply, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(requestrepliesResource, c.ns, name, pt, data, subresources...), &v1alpha1.RequestReply{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RequestReply), err +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go index d5bd1a045d..96c08942a5 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type EventPolicyExpansion interface{} + +type RequestReplyExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/requestreply.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/requestreply.go new file mode 100644 index 0000000000..01742674fc --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/requestreply.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1" + scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme" +) + +// RequestRepliesGetter has a method to return a RequestReplyInterface. +// A group's client should implement this interface. +type RequestRepliesGetter interface { + RequestReplies(namespace string) RequestReplyInterface +} + +// RequestReplyInterface has methods to work with RequestReply resources. +type RequestReplyInterface interface { + Create(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.CreateOptions) (*v1alpha1.RequestReply, error) + Update(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (*v1alpha1.RequestReply, error) + UpdateStatus(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (*v1alpha1.RequestReply, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RequestReply, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RequestReplyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RequestReply, err error) + RequestReplyExpansion +} + +// requestReplies implements RequestReplyInterface +type requestReplies struct { + client rest.Interface + ns string +} + +// newRequestReplies returns a RequestReplies +func newRequestReplies(c *EventingV1alpha1Client, namespace string) *requestReplies { + return &requestReplies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the requestReply, and returns the corresponding requestReply object, and an error if there is any. +func (c *requestReplies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RequestReply, err error) { + result = &v1alpha1.RequestReply{} + err = c.client.Get(). + Namespace(c.ns). + Resource("requestreplies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RequestReplies that match those selectors. +func (c *requestReplies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RequestReplyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RequestReplyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("requestreplies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested requestReplies. +func (c *requestReplies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("requestreplies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a requestReply and creates it. Returns the server's representation of the requestReply, and an error, if there is any. +func (c *requestReplies) Create(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.CreateOptions) (result *v1alpha1.RequestReply, err error) { + result = &v1alpha1.RequestReply{} + err = c.client.Post(). + Namespace(c.ns). + Resource("requestreplies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(requestReply). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a requestReply and updates it. Returns the server's representation of the requestReply, and an error, if there is any. +func (c *requestReplies) Update(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (result *v1alpha1.RequestReply, err error) { + result = &v1alpha1.RequestReply{} + err = c.client.Put(). + Namespace(c.ns). + Resource("requestreplies"). + Name(requestReply.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(requestReply). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *requestReplies) UpdateStatus(ctx context.Context, requestReply *v1alpha1.RequestReply, opts v1.UpdateOptions) (result *v1alpha1.RequestReply, err error) { + result = &v1alpha1.RequestReply{} + err = c.client.Put(). + Namespace(c.ns). + Resource("requestreplies"). + Name(requestReply.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(requestReply). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the requestReply and deletes it. Returns an error if one occurs. +func (c *requestReplies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("requestreplies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *requestReplies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("requestreplies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched requestReply. +func (c *requestReplies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RequestReply, err error) { + result = &v1alpha1.RequestReply{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("requestreplies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_integrationsink.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_integrationsink.go new file mode 100644 index 0000000000..690b2a948b --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_integrationsink.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" +) + +// FakeIntegrationSinks implements IntegrationSinkInterface +type FakeIntegrationSinks struct { + Fake *FakeSinksV1alpha1 + ns string +} + +var integrationsinksResource = v1alpha1.SchemeGroupVersion.WithResource("integrationsinks") + +var integrationsinksKind = v1alpha1.SchemeGroupVersion.WithKind("IntegrationSink") + +// Get takes name of the integrationSink, and returns the corresponding integrationSink object, and an error if there is any. +func (c *FakeIntegrationSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IntegrationSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(integrationsinksResource, c.ns, name), &v1alpha1.IntegrationSink{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSink), err +} + +// List takes label and field selectors, and returns the list of IntegrationSinks that match those selectors. +func (c *FakeIntegrationSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IntegrationSinkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(integrationsinksResource, integrationsinksKind, c.ns, opts), &v1alpha1.IntegrationSinkList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.IntegrationSinkList{ListMeta: obj.(*v1alpha1.IntegrationSinkList).ListMeta} + for _, item := range obj.(*v1alpha1.IntegrationSinkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested integrationSinks. +func (c *FakeIntegrationSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(integrationsinksResource, c.ns, opts)) + +} + +// Create takes the representation of a integrationSink and creates it. Returns the server's representation of the integrationSink, and an error, if there is any. +func (c *FakeIntegrationSinks) Create(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.CreateOptions) (result *v1alpha1.IntegrationSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(integrationsinksResource, c.ns, integrationSink), &v1alpha1.IntegrationSink{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSink), err +} + +// Update takes the representation of a integrationSink and updates it. Returns the server's representation of the integrationSink, and an error, if there is any. +func (c *FakeIntegrationSinks) Update(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(integrationsinksResource, c.ns, integrationSink), &v1alpha1.IntegrationSink{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSink), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIntegrationSinks) UpdateStatus(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (*v1alpha1.IntegrationSink, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(integrationsinksResource, "status", c.ns, integrationSink), &v1alpha1.IntegrationSink{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSink), err +} + +// Delete takes name of the integrationSink and deletes it. Returns an error if one occurs. +func (c *FakeIntegrationSinks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(integrationsinksResource, c.ns, name, opts), &v1alpha1.IntegrationSink{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIntegrationSinks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(integrationsinksResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.IntegrationSinkList{}) + return err +} + +// Patch applies the patch and returns the patched integrationSink. +func (c *FakeIntegrationSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSink, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(integrationsinksResource, c.ns, name, pt, data, subresources...), &v1alpha1.IntegrationSink{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSink), err +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_sinks_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_sinks_client.go index f64a39a5c9..c3cbdcfe5f 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_sinks_client.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake/fake_sinks_client.go @@ -28,6 +28,10 @@ type FakeSinksV1alpha1 struct { *testing.Fake } +func (c *FakeSinksV1alpha1) IntegrationSinks(namespace string) v1alpha1.IntegrationSinkInterface { + return &FakeIntegrationSinks{c, namespace} +} + func (c *FakeSinksV1alpha1) JobSinks(namespace string) v1alpha1.JobSinkInterface { return &FakeJobSinks{c, namespace} } diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go index 029b7bee81..b444c44606 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1alpha1 +type IntegrationSinkExpansion interface{} + type JobSinkExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/integrationsink.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/integrationsink.go new file mode 100644 index 0000000000..3e75e3aedf --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/integrationsink.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" + scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme" +) + +// IntegrationSinksGetter has a method to return a IntegrationSinkInterface. +// A group's client should implement this interface. +type IntegrationSinksGetter interface { + IntegrationSinks(namespace string) IntegrationSinkInterface +} + +// IntegrationSinkInterface has methods to work with IntegrationSink resources. +type IntegrationSinkInterface interface { + Create(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.CreateOptions) (*v1alpha1.IntegrationSink, error) + Update(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (*v1alpha1.IntegrationSink, error) + UpdateStatus(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (*v1alpha1.IntegrationSink, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IntegrationSink, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IntegrationSinkList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSink, err error) + IntegrationSinkExpansion +} + +// integrationSinks implements IntegrationSinkInterface +type integrationSinks struct { + client rest.Interface + ns string +} + +// newIntegrationSinks returns a IntegrationSinks +func newIntegrationSinks(c *SinksV1alpha1Client, namespace string) *integrationSinks { + return &integrationSinks{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the integrationSink, and returns the corresponding integrationSink object, and an error if there is any. +func (c *integrationSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IntegrationSink, err error) { + result = &v1alpha1.IntegrationSink{} + err = c.client.Get(). + Namespace(c.ns). + Resource("integrationsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IntegrationSinks that match those selectors. +func (c *integrationSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IntegrationSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IntegrationSinkList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("integrationsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested integrationSinks. +func (c *integrationSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("integrationsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a integrationSink and creates it. Returns the server's representation of the integrationSink, and an error, if there is any. +func (c *integrationSinks) Create(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.CreateOptions) (result *v1alpha1.IntegrationSink, err error) { + result = &v1alpha1.IntegrationSink{} + err = c.client.Post(). + Namespace(c.ns). + Resource("integrationsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSink). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a integrationSink and updates it. Returns the server's representation of the integrationSink, and an error, if there is any. +func (c *integrationSinks) Update(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSink, err error) { + result = &v1alpha1.IntegrationSink{} + err = c.client.Put(). + Namespace(c.ns). + Resource("integrationsinks"). + Name(integrationSink.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSink). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *integrationSinks) UpdateStatus(ctx context.Context, integrationSink *v1alpha1.IntegrationSink, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSink, err error) { + result = &v1alpha1.IntegrationSink{} + err = c.client.Put(). + Namespace(c.ns). + Resource("integrationsinks"). + Name(integrationSink.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSink). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the integrationSink and deletes it. Returns an error if one occurs. +func (c *integrationSinks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("integrationsinks"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *integrationSinks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("integrationsinks"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched integrationSink. +func (c *integrationSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSink, err error) { + result = &v1alpha1.IntegrationSink{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("integrationsinks"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go index 2012a8f57a..9d394dfb0b 100644 --- a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go @@ -28,6 +28,7 @@ import ( type SinksV1alpha1Interface interface { RESTClient() rest.Interface + IntegrationSinksGetter JobSinksGetter } @@ -36,6 +37,10 @@ type SinksV1alpha1Client struct { restClient rest.Interface } +func (c *SinksV1alpha1Client) IntegrationSinks(namespace string) IntegrationSinkInterface { + return newIntegrationSinks(c, namespace) +} + func (c *SinksV1alpha1Client) JobSinks(namespace string) JobSinkInterface { return newJobSinks(c, namespace) } diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go new file mode 100644 index 0000000000..0b13fd8e00 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..40528db3a5 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_integrationsource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_integrationsource.go new file mode 100644 index 0000000000..cf59005eb0 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_integrationsource.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" +) + +// FakeIntegrationSources implements IntegrationSourceInterface +type FakeIntegrationSources struct { + Fake *FakeSourcesV1alpha1 + ns string +} + +var integrationsourcesResource = v1alpha1.SchemeGroupVersion.WithResource("integrationsources") + +var integrationsourcesKind = v1alpha1.SchemeGroupVersion.WithKind("IntegrationSource") + +// Get takes name of the integrationSource, and returns the corresponding integrationSource object, and an error if there is any. +func (c *FakeIntegrationSources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IntegrationSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(integrationsourcesResource, c.ns, name), &v1alpha1.IntegrationSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSource), err +} + +// List takes label and field selectors, and returns the list of IntegrationSources that match those selectors. +func (c *FakeIntegrationSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IntegrationSourceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(integrationsourcesResource, integrationsourcesKind, c.ns, opts), &v1alpha1.IntegrationSourceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.IntegrationSourceList{ListMeta: obj.(*v1alpha1.IntegrationSourceList).ListMeta} + for _, item := range obj.(*v1alpha1.IntegrationSourceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested integrationSources. +func (c *FakeIntegrationSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(integrationsourcesResource, c.ns, opts)) + +} + +// Create takes the representation of a integrationSource and creates it. Returns the server's representation of the integrationSource, and an error, if there is any. +func (c *FakeIntegrationSources) Create(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.CreateOptions) (result *v1alpha1.IntegrationSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(integrationsourcesResource, c.ns, integrationSource), &v1alpha1.IntegrationSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSource), err +} + +// Update takes the representation of a integrationSource and updates it. Returns the server's representation of the integrationSource, and an error, if there is any. +func (c *FakeIntegrationSources) Update(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(integrationsourcesResource, c.ns, integrationSource), &v1alpha1.IntegrationSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSource), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIntegrationSources) UpdateStatus(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (*v1alpha1.IntegrationSource, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(integrationsourcesResource, "status", c.ns, integrationSource), &v1alpha1.IntegrationSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSource), err +} + +// Delete takes name of the integrationSource and deletes it. Returns an error if one occurs. +func (c *FakeIntegrationSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(integrationsourcesResource, c.ns, name, opts), &v1alpha1.IntegrationSource{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIntegrationSources) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(integrationsourcesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.IntegrationSourceList{}) + return err +} + +// Patch applies the patch and returns the patched integrationSource. +func (c *FakeIntegrationSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(integrationsourcesResource, c.ns, name, pt, data, subresources...), &v1alpha1.IntegrationSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.IntegrationSource), err +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go new file mode 100644 index 0000000000..29164f5c7a --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1" +) + +type FakeSourcesV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSourcesV1alpha1) IntegrationSources(namespace string) v1alpha1.IntegrationSourceInterface { + return &FakeIntegrationSources{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSourcesV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..93a464c523 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type IntegrationSourceExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/integrationsource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/integrationsource.go new file mode 100644 index 0000000000..aa695a3b87 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/integrationsource.go @@ -0,0 +1,195 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" + scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme" +) + +// IntegrationSourcesGetter has a method to return a IntegrationSourceInterface. +// A group's client should implement this interface. +type IntegrationSourcesGetter interface { + IntegrationSources(namespace string) IntegrationSourceInterface +} + +// IntegrationSourceInterface has methods to work with IntegrationSource resources. +type IntegrationSourceInterface interface { + Create(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.CreateOptions) (*v1alpha1.IntegrationSource, error) + Update(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (*v1alpha1.IntegrationSource, error) + UpdateStatus(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (*v1alpha1.IntegrationSource, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IntegrationSource, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IntegrationSourceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSource, err error) + IntegrationSourceExpansion +} + +// integrationSources implements IntegrationSourceInterface +type integrationSources struct { + client rest.Interface + ns string +} + +// newIntegrationSources returns a IntegrationSources +func newIntegrationSources(c *SourcesV1alpha1Client, namespace string) *integrationSources { + return &integrationSources{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the integrationSource, and returns the corresponding integrationSource object, and an error if there is any. +func (c *integrationSources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IntegrationSource, err error) { + result = &v1alpha1.IntegrationSource{} + err = c.client.Get(). + Namespace(c.ns). + Resource("integrationsources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IntegrationSources that match those selectors. +func (c *integrationSources) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IntegrationSourceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IntegrationSourceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("integrationsources"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested integrationSources. +func (c *integrationSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("integrationsources"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a integrationSource and creates it. Returns the server's representation of the integrationSource, and an error, if there is any. +func (c *integrationSources) Create(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.CreateOptions) (result *v1alpha1.IntegrationSource, err error) { + result = &v1alpha1.IntegrationSource{} + err = c.client.Post(). + Namespace(c.ns). + Resource("integrationsources"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSource). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a integrationSource and updates it. Returns the server's representation of the integrationSource, and an error, if there is any. +func (c *integrationSources) Update(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSource, err error) { + result = &v1alpha1.IntegrationSource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("integrationsources"). + Name(integrationSource.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSource). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *integrationSources) UpdateStatus(ctx context.Context, integrationSource *v1alpha1.IntegrationSource, opts v1.UpdateOptions) (result *v1alpha1.IntegrationSource, err error) { + result = &v1alpha1.IntegrationSource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("integrationsources"). + Name(integrationSource.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(integrationSource). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the integrationSource and deletes it. Returns an error if one occurs. +func (c *integrationSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("integrationsources"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *integrationSources) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("integrationsources"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched integrationSource. +func (c *integrationSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IntegrationSource, err error) { + result = &v1alpha1.IntegrationSource{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("integrationsources"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go new file mode 100644 index 0000000000..838e004278 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go @@ -0,0 +1,107 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" + "knative.dev/eventing/pkg/client/clientset/versioned/scheme" +) + +type SourcesV1alpha1Interface interface { + RESTClient() rest.Interface + IntegrationSourcesGetter +} + +// SourcesV1alpha1Client is used to interact with features provided by the sources.knative.dev group. +type SourcesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SourcesV1alpha1Client) IntegrationSources(namespace string) IntegrationSourceInterface { + return newIntegrationSources(c, namespace) +} + +// NewForConfig creates a new SourcesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*SourcesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SourcesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SourcesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &SourcesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SourcesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SourcesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SourcesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SourcesV1alpha1Client { + return &SourcesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SourcesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go index 89263c2585..4ae2f7faa0 100644 --- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // EventPolicies returns a EventPolicyInformer. EventPolicies() EventPolicyInformer + // RequestReplies returns a RequestReplyInformer. + RequestReplies() RequestReplyInformer } type version struct { @@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) EventPolicies() EventPolicyInformer { return &eventPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } + +// RequestReplies returns a RequestReplyInformer. +func (v *version) RequestReplies() RequestReplyInformer { + return &requestReplyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/requestreply.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/requestreply.go new file mode 100644 index 0000000000..d2960b6e79 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/requestreply.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1" + versioned "knative.dev/eventing/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1" +) + +// RequestReplyInformer provides access to a shared informer and lister for +// RequestReplies. +type RequestReplyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RequestReplyLister +} + +type requestReplyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRequestReplyInformer constructs a new informer for RequestReply type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRequestReplyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRequestReplyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRequestReplyInformer constructs a new informer for RequestReply type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRequestReplyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventingV1alpha1().RequestReplies(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventingV1alpha1().RequestReplies(namespace).Watch(context.TODO(), options) + }, + }, + &eventingv1alpha1.RequestReply{}, + resyncPeriod, + indexers, + ) +} + +func (f *requestReplyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRequestReplyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *requestReplyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&eventingv1alpha1.RequestReply{}, f.defaultInformer) +} + +func (f *requestReplyInformer) Lister() v1alpha1.RequestReplyLister { + return v1alpha1.NewRequestReplyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go index 41c490e06c..6664060b73 100644 --- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go @@ -32,6 +32,7 @@ import ( messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1" + sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2" ) @@ -70,6 +71,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=eventing.knative.dev, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("eventpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().EventPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("requestreplies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().RequestReplies().Informer()}, nil // Group=eventing.knative.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("eventtypes"): @@ -98,6 +101,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().Subscriptions().Informer()}, nil // Group=sinks.knative.dev, Version=v1alpha1 + case sinksv1alpha1.SchemeGroupVersion.WithResource("integrationsinks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Sinks().V1alpha1().IntegrationSinks().Informer()}, nil case sinksv1alpha1.SchemeGroupVersion.WithResource("jobsinks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sinks().V1alpha1().JobSinks().Informer()}, nil @@ -111,6 +116,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case sourcesv1.SchemeGroupVersion.WithResource("sinkbindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().SinkBindings().Informer()}, nil + // Group=sources.knative.dev, Version=v1alpha1 + case sourcesv1alpha1.SchemeGroupVersion.WithResource("integrationsources"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().IntegrationSources().Informer()}, nil + // Group=sources.knative.dev, Version=v1beta2 case sourcesv1beta2.SchemeGroupVersion.WithResource("pingsources"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1beta2().PingSources().Informer()}, nil diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/integrationsink.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/integrationsink.go new file mode 100644 index 0000000000..d67f21681f --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/integrationsink.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" + versioned "knative.dev/eventing/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/eventing/pkg/client/listers/sinks/v1alpha1" +) + +// IntegrationSinkInformer provides access to a shared informer and lister for +// IntegrationSinks. +type IntegrationSinkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.IntegrationSinkLister +} + +type integrationSinkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIntegrationSinkInformer constructs a new informer for IntegrationSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIntegrationSinkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIntegrationSinkInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIntegrationSinkInformer constructs a new informer for IntegrationSink type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIntegrationSinkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SinksV1alpha1().IntegrationSinks(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SinksV1alpha1().IntegrationSinks(namespace).Watch(context.TODO(), options) + }, + }, + &sinksv1alpha1.IntegrationSink{}, + resyncPeriod, + indexers, + ) +} + +func (f *integrationSinkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIntegrationSinkInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *integrationSinkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&sinksv1alpha1.IntegrationSink{}, f.defaultInformer) +} + +func (f *integrationSinkInformer) Lister() v1alpha1.IntegrationSinkLister { + return v1alpha1.NewIntegrationSinkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go index 68ed173ac5..fec7f62e62 100644 --- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go @@ -24,6 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // IntegrationSinks returns a IntegrationSinkInformer. + IntegrationSinks() IntegrationSinkInformer // JobSinks returns a JobSinkInformer. JobSinks() JobSinkInformer } @@ -39,6 +41,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// IntegrationSinks returns a IntegrationSinkInformer. +func (v *version) IntegrationSinks() IntegrationSinkInformer { + return &integrationSinkInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // JobSinks returns a JobSinkInformer. func (v *version) JobSinks() JobSinkInformer { return &jobSinkInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go index c8b6385943..20ad73586e 100644 --- a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go @@ -21,6 +21,7 @@ package sources import ( internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces" v1 "knative.dev/eventing/pkg/client/informers/externalversions/sources/v1" + v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1" v1beta2 "knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta2 provides access to shared informers for resources in V1beta2. V1beta2() v1beta2.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta2 returns a new v1beta2.Interface. func (g *group) V1beta2() v1beta2.Interface { return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/integrationsource.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/integrationsource.go new file mode 100644 index 0000000000..34e3a1f1b3 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/integrationsource.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" + versioned "knative.dev/eventing/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/eventing/pkg/client/listers/sources/v1alpha1" +) + +// IntegrationSourceInformer provides access to a shared informer and lister for +// IntegrationSources. +type IntegrationSourceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.IntegrationSourceLister +} + +type integrationSourceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIntegrationSourceInformer constructs a new informer for IntegrationSource type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIntegrationSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIntegrationSourceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIntegrationSourceInformer constructs a new informer for IntegrationSource type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIntegrationSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SourcesV1alpha1().IntegrationSources(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SourcesV1alpha1().IntegrationSources(namespace).Watch(context.TODO(), options) + }, + }, + &sourcesv1alpha1.IntegrationSource{}, + resyncPeriod, + indexers, + ) +} + +func (f *integrationSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIntegrationSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *integrationSourceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&sourcesv1alpha1.IntegrationSource{}, f.defaultInformer) +} + +func (f *integrationSourceInformer) Lister() v1alpha1.IntegrationSourceLister { + return v1alpha1.NewIntegrationSourceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/interface.go new file mode 100644 index 0000000000..6dce960d20 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // IntegrationSources returns a IntegrationSourceInformer. + IntegrationSources() IntegrationSourceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// IntegrationSources returns a IntegrationSourceInformer. +func (v *version) IntegrationSources() IntegrationSourceInformer { + return &integrationSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/expansion_generated.go index e3f601930d..1d021242be 100644 --- a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/expansion_generated.go +++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/expansion_generated.go @@ -25,3 +25,11 @@ type EventPolicyListerExpansion interface{} // EventPolicyNamespaceListerExpansion allows custom methods to be added to // EventPolicyNamespaceLister. type EventPolicyNamespaceListerExpansion interface{} + +// RequestReplyListerExpansion allows custom methods to be added to +// RequestReplyLister. +type RequestReplyListerExpansion interface{} + +// RequestReplyNamespaceListerExpansion allows custom methods to be added to +// RequestReplyNamespaceLister. +type RequestReplyNamespaceListerExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/requestreply.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/requestreply.go new file mode 100644 index 0000000000..fdfe4d17e9 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1alpha1/requestreply.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1" +) + +// RequestReplyLister helps list RequestReplies. +// All objects returned here must be treated as read-only. +type RequestReplyLister interface { + // List lists all RequestReplies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.RequestReply, err error) + // RequestReplies returns an object that can list and get RequestReplies. + RequestReplies(namespace string) RequestReplyNamespaceLister + RequestReplyListerExpansion +} + +// requestReplyLister implements the RequestReplyLister interface. +type requestReplyLister struct { + indexer cache.Indexer +} + +// NewRequestReplyLister returns a new RequestReplyLister. +func NewRequestReplyLister(indexer cache.Indexer) RequestReplyLister { + return &requestReplyLister{indexer: indexer} +} + +// List lists all RequestReplies in the indexer. +func (s *requestReplyLister) List(selector labels.Selector) (ret []*v1alpha1.RequestReply, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RequestReply)) + }) + return ret, err +} + +// RequestReplies returns an object that can list and get RequestReplies. +func (s *requestReplyLister) RequestReplies(namespace string) RequestReplyNamespaceLister { + return requestReplyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RequestReplyNamespaceLister helps list and get RequestReplies. +// All objects returned here must be treated as read-only. +type RequestReplyNamespaceLister interface { + // List lists all RequestReplies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.RequestReply, err error) + // Get retrieves the RequestReply from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.RequestReply, error) + RequestReplyNamespaceListerExpansion +} + +// requestReplyNamespaceLister implements the RequestReplyNamespaceLister +// interface. +type requestReplyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RequestReplies in the indexer for a given namespace. +func (s requestReplyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RequestReply, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RequestReply)) + }) + return ret, err +} + +// Get retrieves the RequestReply from the indexer for a given namespace and name. +func (s requestReplyNamespaceLister) Get(name string) (*v1alpha1.RequestReply, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("requestreply"), name) + } + return obj.(*v1alpha1.RequestReply), nil +} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go index 48dbd03263..85542eb93d 100644 --- a/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go +++ b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go @@ -18,6 +18,14 @@ limitations under the License. package v1alpha1 +// IntegrationSinkListerExpansion allows custom methods to be added to +// IntegrationSinkLister. +type IntegrationSinkListerExpansion interface{} + +// IntegrationSinkNamespaceListerExpansion allows custom methods to be added to +// IntegrationSinkNamespaceLister. +type IntegrationSinkNamespaceListerExpansion interface{} + // JobSinkListerExpansion allows custom methods to be added to // JobSinkLister. type JobSinkListerExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/integrationsink.go b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/integrationsink.go new file mode 100644 index 0000000000..95202b53f2 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/integrationsink.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" +) + +// IntegrationSinkLister helps list IntegrationSinks. +// All objects returned here must be treated as read-only. +type IntegrationSinkLister interface { + // List lists all IntegrationSinks in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.IntegrationSink, err error) + // IntegrationSinks returns an object that can list and get IntegrationSinks. + IntegrationSinks(namespace string) IntegrationSinkNamespaceLister + IntegrationSinkListerExpansion +} + +// integrationSinkLister implements the IntegrationSinkLister interface. +type integrationSinkLister struct { + indexer cache.Indexer +} + +// NewIntegrationSinkLister returns a new IntegrationSinkLister. +func NewIntegrationSinkLister(indexer cache.Indexer) IntegrationSinkLister { + return &integrationSinkLister{indexer: indexer} +} + +// List lists all IntegrationSinks in the indexer. +func (s *integrationSinkLister) List(selector labels.Selector) (ret []*v1alpha1.IntegrationSink, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IntegrationSink)) + }) + return ret, err +} + +// IntegrationSinks returns an object that can list and get IntegrationSinks. +func (s *integrationSinkLister) IntegrationSinks(namespace string) IntegrationSinkNamespaceLister { + return integrationSinkNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IntegrationSinkNamespaceLister helps list and get IntegrationSinks. +// All objects returned here must be treated as read-only. +type IntegrationSinkNamespaceLister interface { + // List lists all IntegrationSinks in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.IntegrationSink, err error) + // Get retrieves the IntegrationSink from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.IntegrationSink, error) + IntegrationSinkNamespaceListerExpansion +} + +// integrationSinkNamespaceLister implements the IntegrationSinkNamespaceLister +// interface. +type integrationSinkNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all IntegrationSinks in the indexer for a given namespace. +func (s integrationSinkNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.IntegrationSink, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IntegrationSink)) + }) + return ret, err +} + +// Get retrieves the IntegrationSink from the indexer for a given namespace and name. +func (s integrationSinkNamespaceLister) Get(name string) (*v1alpha1.IntegrationSink, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("integrationsink"), name) + } + return obj.(*v1alpha1.IntegrationSink), nil +} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..16600e2ab8 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// IntegrationSourceListerExpansion allows custom methods to be added to +// IntegrationSourceLister. +type IntegrationSourceListerExpansion interface{} + +// IntegrationSourceNamespaceListerExpansion allows custom methods to be added to +// IntegrationSourceNamespaceLister. +type IntegrationSourceNamespaceListerExpansion interface{} diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/integrationsource.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/integrationsource.go new file mode 100644 index 0000000000..a9d3d1a529 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1alpha1/integrationsource.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" +) + +// IntegrationSourceLister helps list IntegrationSources. +// All objects returned here must be treated as read-only. +type IntegrationSourceLister interface { + // List lists all IntegrationSources in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.IntegrationSource, err error) + // IntegrationSources returns an object that can list and get IntegrationSources. + IntegrationSources(namespace string) IntegrationSourceNamespaceLister + IntegrationSourceListerExpansion +} + +// integrationSourceLister implements the IntegrationSourceLister interface. +type integrationSourceLister struct { + indexer cache.Indexer +} + +// NewIntegrationSourceLister returns a new IntegrationSourceLister. +func NewIntegrationSourceLister(indexer cache.Indexer) IntegrationSourceLister { + return &integrationSourceLister{indexer: indexer} +} + +// List lists all IntegrationSources in the indexer. +func (s *integrationSourceLister) List(selector labels.Selector) (ret []*v1alpha1.IntegrationSource, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IntegrationSource)) + }) + return ret, err +} + +// IntegrationSources returns an object that can list and get IntegrationSources. +func (s *integrationSourceLister) IntegrationSources(namespace string) IntegrationSourceNamespaceLister { + return integrationSourceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IntegrationSourceNamespaceLister helps list and get IntegrationSources. +// All objects returned here must be treated as read-only. +type IntegrationSourceNamespaceLister interface { + // List lists all IntegrationSources in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.IntegrationSource, err error) + // Get retrieves the IntegrationSource from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.IntegrationSource, error) + IntegrationSourceNamespaceListerExpansion +} + +// integrationSourceNamespaceLister implements the IntegrationSourceNamespaceLister +// interface. +type integrationSourceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all IntegrationSources in the indexer for a given namespace. +func (s integrationSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.IntegrationSource, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.IntegrationSource)) + }) + return ret, err +} + +// Get retrieves the IntegrationSource from the indexer for a given namespace and name. +func (s integrationSourceNamespaceLister) Get(name string) (*v1alpha1.IntegrationSource, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("integrationsource"), name) + } + return obj.(*v1alpha1.IntegrationSource), nil +} diff --git a/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go b/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go index 718a744c19..eedea290d5 100644 --- a/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go +++ b/vendor/knative.dev/eventing/pkg/eventingtls/eventingtls.go @@ -57,6 +57,9 @@ const ( BrokerFilterServerTLSSecretName = "mt-broker-filter-server-tls" //nolint:gosec // This is not a hardcoded credential // BrokerIngressServerTLSSecretName is the name of the tls secret for the broker ingress server BrokerIngressServerTLSSecretName = "mt-broker-ingress-server-tls" //nolint:gosec // This is not a hardcoded credential + + // IntegrationSinkDispatcherServerTLSSecretName is the name of the tls secret for the integration sink dispatcher server + IntegrationSinkDispatcherServerTLSSecretName = "integration-sink-server-tls" //nolint:gosec // This is not a hardcoded credential ) type ClientConfig struct { diff --git a/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/broker.go b/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/broker.go index e4bfa7141e..b04ed910d9 100644 --- a/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/broker.go +++ b/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/broker.go @@ -61,6 +61,15 @@ func WithBrokerFinalizers(finalizers ...string) BrokerOption { } } +func WithBrokerAnnotation(key, value string) BrokerOption { + return func(b *v1.Broker) { + if b.Annotations == nil { + b.Annotations = map[string]string{} + } + b.Annotations[key] = value + } +} + func WithBrokerResourceVersion(rv string) BrokerOption { return func(b *v1.Broker) { b.ResourceVersion = rv diff --git a/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/listers.go b/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/listers.go index 2d2c32a9f9..3ba7f5c833 100644 --- a/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/listers.go +++ b/vendor/knative.dev/eventing/pkg/reconciler/testing/v1/listers.go @@ -39,6 +39,7 @@ import ( messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1" sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1" + sourcesv1alpha1 "knative.dev/eventing/pkg/apis/sources/v1alpha1" fakeeventingclientset "knative.dev/eventing/pkg/client/clientset/versioned/fake" eventinglisters "knative.dev/eventing/pkg/client/listers/eventing/v1" eventingv1alpha1listers "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1" @@ -47,6 +48,7 @@ import ( messaginglisters "knative.dev/eventing/pkg/client/listers/messaging/v1" sinkslisters "knative.dev/eventing/pkg/client/listers/sinks/v1alpha1" sourcelisters "knative.dev/eventing/pkg/client/listers/sources/v1" + sourcev1alpha1listers "knative.dev/eventing/pkg/client/listers/sources/v1alpha1" testscheme "knative.dev/eventing/pkg/reconciler/testing/scheme" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/reconciler/testing" @@ -122,10 +124,18 @@ func (l *Listers) GetEventPolicyLister() eventingv1alpha1listers.EventPolicyList return eventingv1alpha1listers.NewEventPolicyLister(l.indexerFor(&eventingv1alpha1.EventPolicy{})) } +func (l *Listers) GetIntegrationSinkLister() sinkslisters.IntegrationSinkLister { + return sinkslisters.NewIntegrationSinkLister(l.indexerFor(&sinksv1alpha1.IntegrationSink{})) +} + func (l *Listers) GetJobSinkLister() sinkslisters.JobSinkLister { return sinkslisters.NewJobSinkLister(l.indexerFor(&sinksv1alpha1.JobSink{})) } +func (l *Listers) GetIntegrationSourceLister() sourcev1alpha1listers.IntegrationSourceLister { + return sourcev1alpha1listers.NewIntegrationSourceLister(l.indexerFor(&sourcesv1alpha1.IntegrationSource{})) +} + func (l *Listers) GetPingSourceLister() sourcelisters.PingSourceLister { return sourcelisters.NewPingSourceLister(l.indexerFor(&sourcesv1.PingSource{})) } diff --git a/vendor/knative.dev/eventing/pkg/scheduler/README.md b/vendor/knative.dev/eventing/pkg/scheduler/README.md index 08543f4753..a40828a3ee 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/README.md +++ b/vendor/knative.dev/eventing/pkg/scheduler/README.md @@ -1,147 +1,72 @@ # Knative Eventing Multi-Tenant Scheduler with High-Availability -An eventing source instance (for example, [KafkaSource](https://github.com/knative-extensions/eventing-kafka/tree/main/pkg/source), [RedisStreamSource](https://github.com/knative-extensions/eventing-redis/tree/main/source), etc) gets materialized as a virtual pod (**vpod**) and can be scaled up and down by increasing or decreasing the number of virtual pod replicas (**vreplicas**). A vreplica corresponds to a resource in the source that can replicated for maximum distributed processing (for example, number of consumers running in a consumer group). +An eventing source instance (for example, KafkaSource, etc) gets materialized as a virtual pod (* +*vpod**) and can be scaled up and down by increasing or decreasing the number of virtual pod +replicas (**vreplicas**). A vreplica corresponds to a resource in the source that can replicated for +maximum distributed processing (for example, number of consumers running in a consumer group). -The vpod multi-tenant [scheduler](#1scheduler) is responsible for placing vreplicas onto real Kubernetes pods. Each pod is limited in capacity and can hold a maximum number of vreplicas. The scheduler takes a list of (source, # of vreplicas) tuples and computes a set of Placements. Placement info are added to the source status. +The vpod multi-tenant [scheduler](#scheduler) is responsible for placing vreplicas onto real +Kubernetes pods. Each pod is limited in capacity and can hold a maximum number of vreplicas. The +scheduler takes a list of (source, # of vreplicas) tuples and computes a set of Placements. +Placement info are added to the source status. -Scheduling strategies rely on pods having a sticky identity (StatefulSet replicas) and the current [State](#4state-collector) of the cluster. - -When a vreplica cannot be scheduled it is added to the list of pending vreplicas. The [Autoscaler](#3autoscaler) monitors this list and allocates more pods for placing it. - -To support high-availability the scheduler distributes vreplicas uniformly across failure domains such as zones/nodes/pods containing replicas from a StatefulSet. - -## General Scheduler Requirements - -1. High Availability: Vreplicas for a source must be evenly spread across domains to reduce impact of failure when a zone/node/pod goes unavailable for scheduling.* - -2. Equal event consumption: Vreplicas for a source must be evenly spread across adapter pods to provide an equal rate of processing events. For example, Kafka broker spreads partitions equally across pods so if vreplicas aren’t equally spread, pods with fewer vreplicas will consume events slower than others. - -3. Pod spread not more than available resources: Vreplicas for a source must be evenly spread across pods such that the total number of pods with placements does not exceed the number of resources available from the source (for example, number of Kafka partitions for the topic it's consuming from). Else, the additional pods have no resources (Kafka partitions) to consume events from and could waste Kubernetes resources. - -* Note: StatefulSet anti-affinity rules guarantee new pods to be scheduled on a new zone and node. +Scheduling strategies rely on pods having a sticky identity (StatefulSet replicas) and the +current [State](#state-collector) of the cluster. ## Components: -### 1.Scheduler -The scheduling framework has a pluggable architecture where plugins are registered and compiled into the scheduler. It allows many scheduling features to be implemented as plugins, while keeping the scheduling "core" simple and maintainable. - -Scheduling happens in a series of stages: - - 1. **Filter**: These plugins (predicates) are used to filter out pods where a vreplica cannot be placed. If any filter plugin marks the pod as infeasible, the remaining plugins will not be called for that pod. A vreplica is marked as unschedulable if no pods pass all the filters. - - 2. **Score**: These plugins (priorities) provide a score to each pod that has passed the filtering phase. Scheduler will then select the pod with the highest weighted scores sum. - -Scheduler must be Knative generic with its core functionality implemented as core plugins. Anything specific to an eventing source will be implemented as separate plugins (for example, number of Kafka partitions) - -It allocates one vreplica at a time by filtering and scoring schedulable pods. - -A vreplica can be unschedulable for several reasons such as pods not having enough capacity, constraints cannot be fulfilled, etc. - -### 2.Descheduler - -Similar to scheduler but has its own set of priorities (no predicates today). - -### 3.Autoscaler - -The autoscaler scales up pod replicas of the statefulset adapter when there are vreplicas pending to be scheduled, and scales down if there are unused pods. It takes into consideration a scaling factor that is based on number of domains for HA. - -### 4.State Collector - -Current state information about the cluster is collected after placing each vreplica and during intervals. Cluster information include computing the free capacity for each pod, list of schedulable pods (unschedulable pods are pods that are marked for eviction for compacting, and pods that are on unschedulable nodes (cordoned or unreachable nodes), number of pods (stateful set replicas), number of available nodes, number of zones, a node to zone map, total number of vreplicas in each pod for each vpod (spread), total number of vreplicas in each node for each vpod (spread), total number of vreplicas in each zone for each vpod (spread), etc. - -### 5.Reservation - -Scheduler also tracks vreplicas that have been placed (ie. scheduled) but haven't been committed yet to its vpod status. These reserved veplicas are taken into consideration when computing cluster's state for scheduling the next vreplica. - -### 6.Evictor - -Autoscaler periodically attempts to compact veplicas into a smaller number of free replicas with lower ordinals. Vreplicas placed on higher ordinal pods are evicted and rescheduled to pods with a lower ordinal using the same scheduling strategies. - -## Scheduler Profile - -### Predicates: - -1. **PodFitsResources**: check if a pod has enough capacity [CORE] - -2. **NoMaxResourceCount**: check if total number of placement pods exceed available resources [KAFKA]. It has an argument `NumPartitions` to configure the plugin with the total number of Kafka partitions. - -3. **EvenPodSpread**: check if resources are evenly spread across pods [CORE]. It has an argument `MaxSkew` to configure the plugin with an allowed skew factor. +### Scheduler -### Priorities: +The scheduler allocates as many as vreplicas as possible into the lowest possible StatefulSet +ordinal +number before triggering the autoscaler when no more capacity is left to schedule vpods. -1. **AvailabilityNodePriority**: make sure resources are evenly spread across nodes [CORE]. It has an argument `MaxSkew` to configure the plugin with an allowed skew factor. +### Autoscaler -2. **AvailabilityZonePriority**: make sure resources are evenly spread across zones [CORE]. It has an argument `MaxSkew` to configure the plugin with an allowed skew factor. +The autoscaler scales up pod replicas of the statefulset adapter when there are vreplicas pending to +be scheduled, and scales down if there are unused pods. -3. **LowestOrdinalPriority**: make sure vreplicas are placed on free smaller ordinal pods to minimize resource usage [CORE] +### State Collector -**Example ConfigMap for config-scheduler:** +Current state information about the cluster is collected after placing each vreplica and during +intervals. Cluster information include computing the free capacity for each pod, list of schedulable +pods (unschedulable pods are pods that are marked for eviction for compacting, number of pods ( +stateful set replicas), total number of vreplicas in each pod for each vpod (spread). -``` -data: - predicates: |+ - [ - {"Name": "PodFitsResources"}, - {"Name": "NoMaxResourceCount", - "Args": "{\"NumPartitions\": 100}"}, - {"Name": "EvenPodSpread", - "Args": "{\"MaxSkew\": 2}"} - ] - priorities: |+ - [ - {"Name": "AvailabilityZonePriority", - "Weight": 10, - "Args": "{\"MaxSkew\": 2}"}, - {"Name": "LowestOrdinalPriority", - "Weight": 2} - ] -``` +### Evictor -## Descheduler Profile: - -### Priorities: - -1. **RemoveWithAvailabilityNodePriority**: make sure resources are evenly spread across nodes [CORE] - -2. **RemoveWithAvailabilityZonePriority**: make sure resources are evenly spread across zones [CORE] - -3. **HighestOrdinalPriority**: make sure vreps are removed from higher ordinal pods to minimize resource usage [CORE] - -**Example ConfigMap for config-descheduler:** - -``` -data: - priorities: |+ - [ - {"Name": "RemoveWithEvenPodSpreadPriority", - "Weight": 10, - "Args": "{\"MaxSkew\": 2}"}, - {"Name": "RemoveWithAvailabilityZonePriority", - "Weight": 10, - "Args": "{\"MaxSkew\": 2}"}, - {"Name": "RemoveWithHighestOrdinalPriority", - "Weight": 2} - ] -``` +Autoscaler periodically attempts to compact veplicas into a smaller number of free replicas with +lower ordinals. Vreplicas placed on higher ordinal pods are evicted and rescheduled to pods with a +lower ordinal using the same scheduling strategies. ## Normal Operation 1. **Busy scheduler**: -Scheduler can be very busy allocating the best placements for multiple eventing sources at a time using the scheduler predicates and priorities configured. During this time, the cluster could see statefulset replicas increasing, as the autoscaler computes how many more pods are needed to complete scheduling successfully. Also, the replicas could be decreasing during idle time, either caused by less events flowing through the system, or the evictor compacting vreplicas placements into a smaller number of pods or the deletion of event sources. The current placements are stored in the eventing source's status field for observability. +Scheduler can be very busy allocating the best placements for multiple eventing sources at a time +using the scheduler predicates and priorities configured. During this time, the cluster could see +statefulset replicas increasing, as the autoscaler computes how many more pods are needed to +complete scheduling successfully. Also, the replicas could be decreasing during idle time, either +caused by less events flowing through the system, or the evictor compacting vreplicas placements +into a smaller number of pods or the deletion of event sources. The current placements are stored in +the eventing source's status field for observability. 2. **Software upgrades**: -We can expect periodic software version upgrades or fixes to be performed on the Kubernetes cluster running the scheduler or on the Knative framework installed. Either of these scenarios could involve graceful rebooting of nodes and/or reapplying of controllers, adapters and other resources. - -All existing vreplica placements will still be valid and no rebalancing will be done by the vreplica scheduler. -(For Kafka, its broker may trigger a rebalancing of partitions due to consumer group member changes.) +We can expect periodic software version upgrades or fixes to be performed on the Kubernetes cluster +running the scheduler or on the Knative framework installed. Either of these scenarios could involve +graceful rebooting of nodes and/or reapplying of controllers, adapters and other resources. -TODO: Measure latencies in events processing using a performance tool (KPerf eventing). +All existing vreplica placements will still be valid and no rebalancing will be done by the vreplica +scheduler. +(For Kafka, its broker may trigger a rebalancing of partitions due to consumer group member +changes.) 3. **No more cluster resources**: -When there are no resources available on existing nodes in the cluster to schedule more pods and the autoscaler continues to scale up replicas, the new pods are left in a Pending state till cluster size is increased. Nothing to do for the scheduler until then. +When there are no resources available on existing nodes in the cluster to schedule more pods and the +autoscaler continues to scale up replicas, the new pods are left in a Pending state till cluster +size is increased. Nothing to do for the scheduler until then. ## Disaster Recovery @@ -149,91 +74,14 @@ Some failure scenarios are described below: 1. **Pod failure**: -When a pod/replica in a StatefulSet goes down due to some reason (but its node and zone are healthy), a new replica is spun up by the StatefulSet with the same pod identity (pod can come up on a different node) almost immediately. - -All existing vreplica placements will still be valid and no rebalancing will be done by the vreplica scheduler. -(For Kafka, its broker may trigger a rebalancing of partitions due to consumer group member changes.) - -TODO: Measure latencies in events processing using a performance tool (KPerf eventing). - -2. **Node failure (graceful)**: - -When a node is rebooted for upgrades etc, running pods on the node will be evicted (drained), gracefully terminated and rescheduled on a different node. The drained node will be marked as unschedulable by K8 (`node.Spec.Unschedulable` = True) after its cordoning. - -``` -k describe node knative-worker4 -Name: knative-worker4 -CreationTimestamp: Mon, 30 Aug 2021 11:13:11 -0400 -Taints: none -Unschedulable: true -``` - -All existing vreplica placements will still be valid and no rebalancing will be done by the vreplica scheduler. -(For Kafka, its broker may trigger a rebalancing of partitions due to consumer group member changes.) - -TODO: Measure latencies in events processing using a performance tool (KPerf eventing). +When a pod/replica in a StatefulSet goes down due to some reason (but its node and zone are +healthy), a new replica is spun up by the StatefulSet with the same pod identity (pod can come up on +a different node) almost immediately. -New vreplicas will not be scheduled on pods running on this cordoned node. - -3. **Node failure (abrupt)**: - -When a node goes down unexpectedly due to some physical machine failure (network isolation/ loss, CPU issue, power loss, etc), the node controller does the following few steps - -Pods running on the failed node receives a NodeNotReady Warning event - -``` -k describe pod kafkasource-mt-adapter-5 -n knative-eventing -Name: kafkasource-mt-adapter-5 -Namespace: knative-eventing -Priority: 0 -Node: knative-worker4/172.18.0.3 -Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s - -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 11m default-scheduler Successfully assigned knative-eventing/kafkasource-mt-adapter-5 to knative-worker4 - Normal Pulled 11m kubelet Container image - Normal Created 11m kubelet Created container receive-adapter - Normal Started 11m kubelet Started container receive-adapter - Warning NodeNotReady 3m48s node-controller Node is not ready -``` - -Failing node is tainted with the following Key:Condition: by the node controller if the node controller has not heard from the node in the last node-monitor-grace-period (default is 40 seconds) - -``` -k describe node knative-worker4 -Name: knative-worker4 -Taints: node.kubernetes.io/unreachable:NoExecute - node.kubernetes.io/unreachable:NoSchedule -Unschedulable: false - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal NodeNotSchedulable 5m42s kubelet Node knative-worker4 status is now: NodeNotSchedulable - Normal NodeSchedulable 2m31s kubelet Node knative-worker4 status is now: NodeSchedulable -``` - -``` -k get nodes -NAME STATUS ROLES AGE VERSION -knative-control-plane Ready control-plane,master 7h23m v1.21.1 -knative-worker Ready 7h23m v1.21.1 -knative-worker2 Ready 7h23m v1.21.1 -knative-worker3 Ready 7h23m v1.21.1 -knative-worker4 NotReady 7h23m v1.21.1 -``` - -After a timeout period (`pod-eviction-timeout` == 5 mins (default)), the pods move to the Terminating state. - -Since statefulset now has a `terminationGracePeriodSeconds: 0` setting, the terminating pods are immediately restarted on another functioning Node. A new replica is spun up with the same ordinal. - -During the time period of the failing node being unreachable (~5mins), vreplicas placed on that pod aren’t available to process work from the eventing source. (Theory) Consumption rate goes down and Kafka eventually triggers rebalancing of partitions. Also, KEDA will scale up the number of consumers to resolve the processing lag. A scale up will cause the Eventing scheduler to rebalance the total vreplicas for that source on available running pods. - -4. **Zone failure**: - -All nodes running in the failing zone will be unavailable for scheduling. Nodes will either be tainted with `unreachable` or Spec’ed as `Unschedulable` -See node failure scenarios above for what happens to vreplica placements. +All existing vreplica placements will still be valid and no rebalancing will be done by the vreplica +scheduler. +(For Kafka, its broker may trigger a rebalancing of partitions due to consumer group member +changes.) ## References: @@ -246,7 +94,6 @@ See node failure scenarios above for what happens to vreplica placements. * https://medium.com/tailwinds-navigator/kubernetes-tip-how-statefulsets-behave-differently-than-deployments-when-node-fails-d29e36bca7d5 * https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller - --- To learn more about Knative, please visit the diff --git a/vendor/knative.dev/eventing/pkg/scheduler/doc.go b/vendor/knative.dev/eventing/pkg/scheduler/doc.go index b66262a4be..13cf683a17 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/doc.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// The scheduler is responsible for placing virtual pod (VPod) replicas within real pods. +// Package scheduler is responsible for placing virtual pod (VPod) replicas within real pods. package scheduler diff --git a/vendor/knative.dev/eventing/pkg/scheduler/factory/registry.go b/vendor/knative.dev/eventing/pkg/scheduler/factory/registry.go deleted file mode 100644 index dbc814055c..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/factory/registry.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package factory - -import ( - "fmt" - - state "knative.dev/eventing/pkg/scheduler/state" -) - -// RegistryFP is a collection of all available filter plugins. -type RegistryFP map[string]state.FilterPlugin - -// RegistrySP is a collection of all available scoring plugins. -type RegistrySP map[string]state.ScorePlugin - -var ( - FilterRegistry = make(RegistryFP) - ScoreRegistry = make(RegistrySP) -) - -// Register adds a new plugin to the registry. If a plugin with the same name -// exists, it returns an error. -func RegisterFP(name string, factory state.FilterPlugin) error { - if _, ok := FilterRegistry[name]; ok { - return fmt.Errorf("a filter plugin named %v already exists", name) - } - FilterRegistry[name] = factory - return nil -} - -// Unregister removes an existing plugin from the registry. If no plugin with -// the provided name exists, it returns an error. -func UnregisterFP(name string) error { - if _, ok := FilterRegistry[name]; !ok { - return fmt.Errorf("no filter plugin named %v exists", name) - } - delete(FilterRegistry, name) - return nil -} - -func GetFilterPlugin(name string) (state.FilterPlugin, error) { - if f, exist := FilterRegistry[name]; exist { - return f, nil - } - return nil, fmt.Errorf("no fitler plugin named %v exists", name) -} - -// Register adds a new plugin to the registry. If a plugin with the same name -// exists, it returns an error. -func RegisterSP(name string, factory state.ScorePlugin) error { - if _, ok := ScoreRegistry[name]; ok { - return fmt.Errorf("a score plugin named %v already exists", name) - } - ScoreRegistry[name] = factory - return nil -} - -// Unregister removes an existing plugin from the registry. If no plugin with -// the provided name exists, it returns an error. -func UnregisterSP(name string) error { - if _, ok := ScoreRegistry[name]; !ok { - return fmt.Errorf("no score plugin named %v exists", name) - } - delete(ScoreRegistry, name) - return nil -} - -func GetScorePlugin(name string) (state.ScorePlugin, error) { - if f, exist := ScoreRegistry[name]; exist { - return f, nil - } - return nil, fmt.Errorf("no score plugin named %v exists", name) -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/placement.go b/vendor/knative.dev/eventing/pkg/scheduler/placement.go index 3625032354..65ab7897f0 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/placement.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/placement.go @@ -17,7 +17,6 @@ limitations under the License. package scheduler import ( - "k8s.io/apimachinery/pkg/util/sets" duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1" ) @@ -29,24 +28,3 @@ func GetTotalVReplicas(placements []duckv1alpha1.Placement) int32 { } return r } - -// GetPlacementForPod returns the placement corresponding to podName -func GetPlacementForPod(placements []duckv1alpha1.Placement, podName string) *duckv1alpha1.Placement { - for i := 0; i < len(placements); i++ { - if placements[i].PodName == podName { - return &placements[i] - } - } - return nil -} - -// GetPodCount returns the number of pods with the given placements -func GetPodCount(placements []duckv1alpha1.Placement) int { - set := sets.NewString() - for _, p := range placements { - if p.VReplicas > 0 { - set.Insert(p.PodName) - } - } - return set.Len() -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilitynodepriority/availability_node_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilitynodepriority/availability_node_priority.go deleted file mode 100644 index e0e60c8832..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilitynodepriority/availability_node_priority.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package availabilitynodepriority - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// AvailabilityNodePriority is a score plugin that favors pods that create an even spread of resources across nodes for HA -type AvailabilityNodePriority struct { -} - -// Verify AvailabilityNodePriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &AvailabilityNodePriority{} - -// Name of the plugin -const Name = state.AvailabilityNodePriority - -const ( - ErrReasonInvalidArg = "invalid arguments" - ErrReasonNoResource = "node does not exist" -) - -func init() { - factory.RegisterSP(Name, &AvailabilityNodePriority{}) -} - -// Name returns name of the plugin -func (pl *AvailabilityNodePriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for nodes that create an even spread across nodes. -func (pl *AvailabilityNodePriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.AvailabilityNodePriorityArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - var skew int32 - - _, nodeName, err := states.GetPodInfo(state.PodNameFromOrdinal(states.StatefulSetName, podID)) - if err != nil { - return score, state.NewStatus(state.Error, ErrReasonNoResource) - } - - currentReps := states.NodeSpread[key][nodeName] //get #vreps on this node - for otherNodeName := range states.NodeToZoneMap { //compare with #vreps on other nodes - if otherNodeName != nodeName { - otherReps := states.NodeSpread[key][otherNodeName] - if skew = (currentReps + 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Node %v with %d and Other Node %v with %d causing skew %d", nodeName, currentReps, otherNodeName, otherReps, skew) - if skew > skewVal.MaxSkew { - logger.Infof("Pod %d in node %v will cause an uneven node spread %v with other node %v", podID, nodeName, states.NodeSpread[key], otherNodeName) - } - score = score + uint64(skew) - } - } - - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *AvailabilityNodePriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *AvailabilityNodePriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilityzonepriority/availability_zone_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilityzonepriority/availability_zone_priority.go deleted file mode 100644 index 397ff075fb..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/availabilityzonepriority/availability_zone_priority.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package availabilityzonepriority - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// AvailabilityZonePriority is a score plugin that favors pods that create an even spread of resources across zones for HA -type AvailabilityZonePriority struct { -} - -// Verify AvailabilityZonePriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &AvailabilityZonePriority{} - -// Name of the plugin -const Name = state.AvailabilityZonePriority - -const ( - ErrReasonInvalidArg = "invalid arguments" - ErrReasonNoResource = "zone does not exist" -) - -func init() { - factory.RegisterSP(Name, &AvailabilityZonePriority{}) -} - -// Name returns name of the plugin -func (pl *AvailabilityZonePriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for zones that create an even spread across zones. -func (pl *AvailabilityZonePriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.AvailabilityZonePriorityArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - var skew int32 - zoneMap := make(map[string]struct{}) - for _, zoneName := range states.NodeToZoneMap { - zoneMap[zoneName] = struct{}{} - } - - zoneName, _, err := states.GetPodInfo(state.PodNameFromOrdinal(states.StatefulSetName, podID)) - if err != nil { - return score, state.NewStatus(state.Error, ErrReasonNoResource) - } - - currentReps := states.ZoneSpread[key][zoneName] //get #vreps on this zone - for otherZoneName := range zoneMap { //compare with #vreps on other zones - if otherZoneName != zoneName { - otherReps := states.ZoneSpread[key][otherZoneName] - if skew = (currentReps + 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Zone %v with %d and Other Zone %v with %d causing skew %d", zoneName, currentReps, otherZoneName, otherReps, skew) - if skew > skewVal.MaxSkew { //score low - logger.Infof("Pod %d in zone %v will cause an uneven zone spread %v with other zone %v", podID, zoneName, states.ZoneSpread[key], otherZoneName) - } - score = score + uint64(skew) - } - } - - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *AvailabilityZonePriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *AvailabilityZonePriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/evenpodspread/even_pod_spread.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/evenpodspread/even_pod_spread.go deleted file mode 100644 index 070e47a995..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/evenpodspread/even_pod_spread.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package evenpodspread - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// EvenPodSpread is a filter or score plugin that picks/favors pods that create an equal spread of resources across pods -type EvenPodSpread struct { -} - -// Verify EvenPodSpread Implements FilterPlugin and ScorePlugin Interface -var _ state.FilterPlugin = &EvenPodSpread{} -var _ state.ScorePlugin = &EvenPodSpread{} - -// Name of the plugin -const ( - Name = state.EvenPodSpread - ErrReasonInvalidArg = "invalid arguments" - ErrReasonUnschedulable = "pod will cause an uneven spread" -) - -func init() { - factory.RegisterFP(Name, &EvenPodSpread{}) - factory.RegisterSP(Name, &EvenPodSpread{}) -} - -// Name returns name of the plugin -func (pl *EvenPodSpread) Name() string { - return Name -} - -// Filter invoked at the filter extension point. -func (pl *EvenPodSpread) Filter(ctx context.Context, args interface{}, states *state.State, key types.NamespacedName, podID int32) *state.Status { - logger := logging.FromContext(ctx).With("Filter", pl.Name()) - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Filter args %v for predicate %q are not valid", args, pl.Name()) - return state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.EvenPodSpreadArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - currentReps := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, podID)] //get #vreps on this podID - var skew int32 - for _, otherPodID := range states.SchedulablePods { //compare with #vreps on other pods - if otherPodID != podID { - otherReps := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, otherPodID)] - - if otherReps == 0 && states.Free(otherPodID) <= 0 { //other pod fully occupied by other vpods - so ignore - continue - } - if skew = (currentReps + 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Pod %d with %d and Other Pod %d with %d causing skew %d", podID, currentReps, otherPodID, otherReps, skew) - if skew > skewVal.MaxSkew { - logger.Infof("Unschedulable! Pod %d will cause an uneven spread %v with other pod %v", podID, states.PodSpread[key], otherPodID) - return state.NewStatus(state.Unschedulable, ErrReasonUnschedulable) - } - } - } - } - - return state.NewStatus(state.Success) -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for pods that create an even spread across pods. -func (pl *EvenPodSpread) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.EvenPodSpreadArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - currentReps := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, podID)] //get #vreps on this podID - var skew int32 - for _, otherPodID := range states.SchedulablePods { //compare with #vreps on other pods - if otherPodID != podID { - otherReps := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, otherPodID)] - if otherReps == 0 && states.Free(otherPodID) == 0 { //other pod fully occupied by other vpods - so ignore - continue - } - if skew = (currentReps + 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Pod %d with %d and Other Pod %d with %d causing skew %d", podID, currentReps, otherPodID, otherReps, skew) - if skew > skewVal.MaxSkew { - logger.Infof("Pod %d will cause an uneven spread %v with other pod %v", podID, states.PodSpread[key], otherPodID) - } - score = score + uint64(skew) - } - } - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *EvenPodSpread) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *EvenPodSpread) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/lowestordinalpriority/lowest_ordinal_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/lowestordinalpriority/lowest_ordinal_priority.go deleted file mode 100644 index a7d84ca390..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/lowestordinalpriority/lowest_ordinal_priority.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lowestordinalpriority - -import ( - "context" - "math" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" -) - -// LowestOrdinalPriority is a score plugin that favors pods that have a lower ordinal -type LowestOrdinalPriority struct { -} - -// Verify LowestOrdinalPriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &LowestOrdinalPriority{} - -// Name of the plugin -const Name = state.LowestOrdinalPriority - -func init() { - factory.RegisterSP(Name, &LowestOrdinalPriority{}) -} - -// Name returns name of the plugin -func (pl *LowestOrdinalPriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for pods with lower ordinal values. -func (pl *LowestOrdinalPriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - score := math.MaxUint64 - uint64(podID) //lower ordinals get higher score - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *LowestOrdinalPriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *LowestOrdinalPriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/podfitsresources/pod_fits_resources.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/podfitsresources/pod_fits_resources.go deleted file mode 100644 index a4a751e847..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/podfitsresources/pod_fits_resources.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podfitsresources - -import ( - "context" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// PodFitsResources is a plugin that filters pods that do not have sufficient free capacity for a vreplica to be placed on it -type PodFitsResources struct { -} - -// Verify PodFitsResources Implements FilterPlugin Interface -var _ state.FilterPlugin = &PodFitsResources{} - -// Name of the plugin -const Name = state.PodFitsResources - -const ( - ErrReasonUnschedulable = "pod at full capacity" -) - -func init() { - factory.RegisterFP(Name, &PodFitsResources{}) -} - -// Name returns name of the plugin -func (pl *PodFitsResources) Name() string { - return Name -} - -// Filter invoked at the filter extension point. -func (pl *PodFitsResources) Filter(ctx context.Context, args interface{}, states *state.State, key types.NamespacedName, podID int32) *state.Status { - logger := logging.FromContext(ctx).With("Filter", pl.Name()) - - if len(states.FreeCap) == 0 || states.Free(podID) > 0 { //vpods with no placements or pods with positive free cap - return state.NewStatus(state.Success) - } - - logger.Infof("Unschedulable! Pod %d has no free capacity %v", podID, states.FreeCap) - return state.NewStatus(state.Unschedulable, ErrReasonUnschedulable) -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilitynodepriority/remove_with_availability_node_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilitynodepriority/remove_with_availability_node_priority.go deleted file mode 100644 index 62959ee79b..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilitynodepriority/remove_with_availability_node_priority.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package removewithavailabilitynodepriority - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// RemoveWithAvailabilityNodePriority is a score plugin that favors pods that create an even spread of resources across nodes for HA -type RemoveWithAvailabilityNodePriority struct { -} - -// Verify RemoveWithAvailabilityNodePriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &RemoveWithAvailabilityNodePriority{} - -// Name of the plugin -const Name = state.RemoveWithAvailabilityNodePriority - -const ( - ErrReasonInvalidArg = "invalid arguments" - ErrReasonNoResource = "node does not exist" -) - -func init() { - factory.RegisterSP(Name, &RemoveWithAvailabilityNodePriority{}) -} - -// Name returns name of the plugin -func (pl *RemoveWithAvailabilityNodePriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for nodes that create an even spread across nodes. -func (pl *RemoveWithAvailabilityNodePriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.AvailabilityNodePriorityArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - var skew int32 - _, nodeName, err := states.GetPodInfo(state.PodNameFromOrdinal(states.StatefulSetName, podID)) - if err != nil { - return score, state.NewStatus(state.Error, ErrReasonNoResource) - } - - currentReps := states.NodeSpread[key][nodeName] //get #vreps on this node - for otherNodeName := range states.NodeToZoneMap { //compare with #vreps on other pods - if otherNodeName != nodeName { - otherReps, ok := states.NodeSpread[key][otherNodeName] - if !ok { - continue //node does not exist in current placement, so move on - } - if skew = (currentReps - 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Node %v with %d and Other Node %v with %d causing skew %d", nodeName, currentReps, otherNodeName, otherReps, skew) - if skew > skewVal.MaxSkew { //score low - logger.Infof("Pod %d in node %v will cause an uneven node spread %v with other node %v", podID, nodeName, states.NodeSpread[key], otherNodeName) - } - score = score + uint64(skew) - } - } - - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *RemoveWithAvailabilityNodePriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *RemoveWithAvailabilityNodePriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilityzonepriority/remove_with_availability_zone_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilityzonepriority/remove_with_availability_zone_priority.go deleted file mode 100644 index f2e3eb23f0..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilityzonepriority/remove_with_availability_zone_priority.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package removewithavailabilityzonepriority - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// RemoveWithAvailabilityZonePriority is a score plugin that favors pods that create an even spread of resources across zones for HA -type RemoveWithAvailabilityZonePriority struct { -} - -// Verify RemoveWithAvailabilityZonePriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &RemoveWithAvailabilityZonePriority{} - -// Name of the plugin -const Name = state.RemoveWithAvailabilityZonePriority - -const ( - ErrReasonInvalidArg = "invalid arguments" - ErrReasonNoResource = "zone does not exist" -) - -func init() { - factory.RegisterSP(Name, &RemoveWithAvailabilityZonePriority{}) -} - -// Name returns name of the plugin -func (pl *RemoveWithAvailabilityZonePriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for zones that create an even spread across zones. -func (pl *RemoveWithAvailabilityZonePriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.AvailabilityZonePriorityArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - var skew int32 - zoneMap := make(map[string]struct{}) - for _, zoneName := range states.NodeToZoneMap { - zoneMap[zoneName] = struct{}{} - } - - zoneName, _, err := states.GetPodInfo(state.PodNameFromOrdinal(states.StatefulSetName, podID)) - if err != nil { - return score, state.NewStatus(state.Error, ErrReasonNoResource) - } - - currentReps := states.ZoneSpread[key][zoneName] //get #vreps on this zone - for otherZoneName := range zoneMap { //compare with #vreps on other pods - if otherZoneName != zoneName { - otherReps, ok := states.ZoneSpread[key][otherZoneName] - if !ok { - continue //zone does not exist in current placement, so move on - } - if skew = (currentReps - 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Zone %v with %d and Other Zone %v with %d causing skew %d", zoneName, currentReps, otherZoneName, otherReps, skew) - if skew > skewVal.MaxSkew { //score low - logger.Infof("Pod %d in zone %v will cause an uneven zone spread %v with other zone %v", podID, zoneName, states.ZoneSpread[key], otherZoneName) - } - score = score + uint64(skew) - } - } - - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *RemoveWithAvailabilityZonePriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *RemoveWithAvailabilityZonePriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithevenpodspreadpriority/remove_with_even_pod_spread_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithevenpodspreadpriority/remove_with_even_pod_spread_priority.go deleted file mode 100644 index e7b008e0b0..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithevenpodspreadpriority/remove_with_even_pod_spread_priority.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package removewithevenpodspreadpriority - -import ( - "context" - "encoding/json" - "math" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// RemoveWithEvenPodSpreadPriority is a filter plugin that eliminates pods that do not create an equal spread of resources across pods -type RemoveWithEvenPodSpreadPriority struct { -} - -// Verify RemoveWithEvenPodSpreadPriority Implements FilterPlugin Interface -var _ state.ScorePlugin = &RemoveWithEvenPodSpreadPriority{} - -// Name of the plugin -const ( - Name = state.RemoveWithEvenPodSpreadPriority - ErrReasonInvalidArg = "invalid arguments" - ErrReasonUnschedulable = "pod will cause an uneven spread" -) - -func init() { - factory.RegisterSP(Name, &RemoveWithEvenPodSpreadPriority{}) -} - -// Name returns name of the plugin -func (pl *RemoveWithEvenPodSpreadPriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for pods that create an even spread across pods. -func (pl *RemoveWithEvenPodSpreadPriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - logger := logging.FromContext(ctx).With("Score", pl.Name()) - var score uint64 = 0 - - spreadArgs, ok := args.(string) - if !ok { - logger.Errorf("Scoring args %v for priority %q are not valid", args, pl.Name()) - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - skewVal := state.EvenPodSpreadArgs{} - decoder := json.NewDecoder(strings.NewReader(spreadArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&skewVal); err != nil { - return 0, state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - if states.Replicas > 0 { //need at least a pod to compute spread - currentReps := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, podID)] //get #vreps on this podID - var skew int32 - for _, otherPodID := range states.SchedulablePods { //compare with #vreps on other pods - if otherPodID != podID { - otherReps, ok := states.PodSpread[key][state.PodNameFromOrdinal(states.StatefulSetName, otherPodID)] - if !ok { - continue //pod does not exist in current placement, so move on - } - if skew = (currentReps - 1) - otherReps; skew < 0 { - skew = skew * int32(-1) - } - - //logger.Infof("Current Pod %v with %d and Other Pod %v with %d causing skew %d", podID, currentReps, otherPodID, otherReps, skew) - if skew > skewVal.MaxSkew { //score low - logger.Infof("Pod %d will cause an uneven spread %v with other pod %v", podID, states.PodSpread[key], otherPodID) - } - score = score + uint64(skew) - } - } - score = math.MaxUint64 - score //lesser skews get higher score - } - - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *RemoveWithEvenPodSpreadPriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *RemoveWithEvenPodSpreadPriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithhighestordinalpriority/remove_with_highest_ordinal_priority.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithhighestordinalpriority/remove_with_highest_ordinal_priority.go deleted file mode 100644 index 324454f5e8..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/core/removewithhighestordinalpriority/remove_with_highest_ordinal_priority.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package removewithhighestordinalpriority - -import ( - "context" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" -) - -// RemoveWithHighestOrdinalPriority is a score plugin that favors pods that have a higher ordinal -type RemoveWithHighestOrdinalPriority struct { -} - -// Verify RemoveWithHighestOrdinalPriority Implements ScorePlugin Interface -var _ state.ScorePlugin = &RemoveWithHighestOrdinalPriority{} - -// Name of the plugin -const Name = state.RemoveWithHighestOrdinalPriority - -func init() { - factory.RegisterSP(Name, &RemoveWithHighestOrdinalPriority{}) -} - -// Name returns name of the plugin -func (pl *RemoveWithHighestOrdinalPriority) Name() string { - return Name -} - -// Score invoked at the score extension point. The "score" returned in this function is higher for pods with higher ordinal values. -func (pl *RemoveWithHighestOrdinalPriority) Score(ctx context.Context, args interface{}, states *state.State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *state.Status) { - score := uint64(podID) //higher ordinals get higher score - return score, state.NewStatus(state.Success) -} - -// ScoreExtensions of the Score plugin. -func (pl *RemoveWithHighestOrdinalPriority) ScoreExtensions() state.ScoreExtensions { - return pl -} - -// NormalizeScore invoked after scoring all pods. -func (pl *RemoveWithHighestOrdinalPriority) NormalizeScore(ctx context.Context, states *state.State, scores state.PodScoreList) *state.Status { - return nil -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/plugins/kafka/nomaxresourcecount/no_max_resource_count.go b/vendor/knative.dev/eventing/pkg/scheduler/plugins/kafka/nomaxresourcecount/no_max_resource_count.go deleted file mode 100644 index 49975eefb8..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/plugins/kafka/nomaxresourcecount/no_max_resource_count.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nomaxresourcecount - -import ( - "context" - "encoding/json" - "strings" - - "k8s.io/apimachinery/pkg/types" - "knative.dev/eventing/pkg/scheduler/factory" - state "knative.dev/eventing/pkg/scheduler/state" - "knative.dev/pkg/logging" -) - -// NoMaxResourceCount plugin filters pods that cause total pods with placements to exceed total partitioncount. -type NoMaxResourceCount struct { -} - -// Verify NoMaxResourceCount Implements FilterPlugin Interface -var _ state.FilterPlugin = &NoMaxResourceCount{} - -// Name of the plugin -const Name = state.NoMaxResourceCount - -const ( - ErrReasonInvalidArg = "invalid arguments" - ErrReasonUnschedulable = "pod increases total # of pods beyond partition count" -) - -func init() { - factory.RegisterFP(Name, &NoMaxResourceCount{}) -} - -// Name returns name of the plugin -func (pl *NoMaxResourceCount) Name() string { - return Name -} - -// Filter invoked at the filter extension point. -func (pl *NoMaxResourceCount) Filter(ctx context.Context, args interface{}, states *state.State, key types.NamespacedName, podID int32) *state.Status { - logger := logging.FromContext(ctx).With("Filter", pl.Name()) - - resourceCountArgs, ok := args.(string) - if !ok { - logger.Errorf("Filter args %v for predicate %q are not valid", args, pl.Name()) - return state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - resVal := state.NoMaxResourceCountArgs{} - decoder := json.NewDecoder(strings.NewReader(resourceCountArgs)) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&resVal); err != nil { - return state.NewStatus(state.Unschedulable, ErrReasonInvalidArg) - } - - podName := state.PodNameFromOrdinal(states.StatefulSetName, podID) - if _, ok := states.PodSpread[key][podName]; !ok && ((len(states.PodSpread[key]) + 1) > resVal.NumPartitions) { //pod not in vrep's partition map and counting this new pod towards total pod count - logger.Infof("Unschedulable! Pod %d filtered due to total pod count %v exceeding partition count", podID, len(states.PodSpread[key])+1) - return state.NewStatus(state.Unschedulable, ErrReasonUnschedulable) - } - - return state.NewStatus(state.Success) -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go b/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go index a9ca7b1d5a..0dd4f2b6c8 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/scheduler.go @@ -30,63 +30,20 @@ import ( duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1" ) -type SchedulerPolicyType string - const ( - // MAXFILLUP policy type adds vreplicas to existing pods to fill them up before adding to new pods - MAXFILLUP SchedulerPolicyType = "MAXFILLUP" - // PodAnnotationKey is an annotation used by the scheduler to be informed of pods // being evicted and not use it for placing vreplicas PodAnnotationKey = "eventing.knative.dev/unschedulable" ) -const ( - ZoneLabel = "topology.kubernetes.io/zone" - - UnknownZone = "unknown" -) - -const ( - // MaxWeight is the maximum weight that can be assigned for a priority. - MaxWeight uint64 = 10 - // MinWeight is the minimum weight that can be assigned for a priority. - MinWeight uint64 = 0 -) - -// Policy describes a struct of a policy resource. -type SchedulerPolicy struct { - // Holds the information to configure the fit predicate functions. - Predicates []PredicatePolicy `json:"predicates"` - // Holds the information to configure the priority functions. - Priorities []PriorityPolicy `json:"priorities"` -} - -// PredicatePolicy describes a struct of a predicate policy. -type PredicatePolicy struct { - // Identifier of the predicate policy - Name string `json:"name"` - // Holds the parameters to configure the given predicate - Args interface{} `json:"args"` -} - -// PriorityPolicy describes a struct of a priority policy. -type PriorityPolicy struct { - // Identifier of the priority policy - Name string `json:"name"` - // The numeric multiplier for the pod scores that the priority function generates - // The weight should be a positive integer - Weight uint64 `json:"weight"` - // Holds the parameters to configure the given priority function - Args interface{} `json:"args"` -} - // VPodLister is the function signature for returning a list of VPods type VPodLister func() ([]VPod, error) // Evictor allows for vreplicas to be evicted. // For instance, the evictor is used by the statefulset scheduler to // move vreplicas to pod with a lower ordinal. +// +// pod might be `nil`. type Evictor func(pod *corev1.Pod, vpod VPod, from *duckv1alpha1.Placement) error // Scheduler is responsible for placing VPods into real Kubernetes pods diff --git a/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go b/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go index ad3a5aaf76..db5d9216b2 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/state/helpers.go @@ -17,14 +17,10 @@ limitations under the License. package state import ( - "context" - "math" "strconv" "strings" - "time" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "knative.dev/eventing/pkg/scheduler" ) @@ -36,7 +32,7 @@ func PodNameFromOrdinal(name string, ordinal int32) string { func OrdinalFromPodName(podName string) int32 { ordinal, err := strconv.ParseInt(podName[strings.LastIndex(podName, "-")+1:], 10, 32) if err != nil { - return math.MaxInt32 + panic(podName + " is not a valid pod name") } return int32(ordinal) } @@ -50,31 +46,3 @@ func GetVPod(key types.NamespacedName, vpods []scheduler.VPod) scheduler.VPod { } return nil } - -func SatisfyZoneAvailability(feasiblePods []int32, states *State) bool { - zoneMap := make(map[string]struct{}) - var zoneName string - var err error - for _, podID := range feasiblePods { - zoneName, _, err = states.GetPodInfo(PodNameFromOrdinal(states.StatefulSetName, podID)) - if err != nil { - continue - } - zoneMap[zoneName] = struct{}{} - } - return len(zoneMap) == int(states.NumZones) -} - -func SatisfyNodeAvailability(feasiblePods []int32, states *State) bool { - nodeMap := make(map[string]struct{}) - var nodeName string - var err error - for _, podID := range feasiblePods { - wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { - _, nodeName, err = states.GetPodInfo(PodNameFromOrdinal(states.StatefulSetName, podID)) - return err == nil, nil - }) - nodeMap[nodeName] = struct{}{} - } - return len(nodeMap) == int(states.NumNodes) -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/state/interface.go b/vendor/knative.dev/eventing/pkg/scheduler/state/interface.go deleted file mode 100644 index 44c7a2d4d4..0000000000 --- a/vendor/knative.dev/eventing/pkg/scheduler/state/interface.go +++ /dev/null @@ -1,209 +0,0 @@ -/* -Copyright 2021 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package state - -import ( - "context" - "errors" - "strings" - - "k8s.io/apimachinery/pkg/types" -) - -const ( - PodFitsResources = "PodFitsResources" - NoMaxResourceCount = "NoMaxResourceCount" - EvenPodSpread = "EvenPodSpread" - AvailabilityNodePriority = "AvailabilityNodePriority" - AvailabilityZonePriority = "AvailabilityZonePriority" - LowestOrdinalPriority = "LowestOrdinalPriority" - RemoveWithEvenPodSpreadPriority = "RemoveWithEvenPodSpreadPriority" - RemoveWithAvailabilityNodePriority = "RemoveWithAvailabilityNodePriority" - RemoveWithAvailabilityZonePriority = "RemoveWithAvailabilityZonePriority" - RemoveWithHighestOrdinalPriority = "RemoveWithHighestOrdinalPriority" -) - -// Plugin is the parent type for all the scheduling framework plugins. -type Plugin interface { - Name() string -} - -type FilterPlugin interface { - Plugin - // Filter is called by the scheduler. - // All FilterPlugins should return "Success" to declare that - // the given pod fits the vreplica. - Filter(ctx context.Context, args interface{}, state *State, key types.NamespacedName, podID int32) *Status -} - -// ScoreExtensions is an interface for Score extended functionality. -type ScoreExtensions interface { - // NormalizeScore is called for all pod scores produced by the same plugin's "Score" - // method. A successful run of NormalizeScore will update the scores list and return - // a success status. - NormalizeScore(ctx context.Context, state *State, scores PodScoreList) *Status -} - -type ScorePlugin interface { - Plugin - // Score is called by the scheduler. - // All ScorePlugins should return "Success" unless the args are invalid. - Score(ctx context.Context, args interface{}, state *State, feasiblePods []int32, key types.NamespacedName, podID int32) (uint64, *Status) - - // ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not - ScoreExtensions() ScoreExtensions -} - -// NoMaxResourceCountArgs holds arguments used to configure the NoMaxResourceCount plugin. -type NoMaxResourceCountArgs struct { - NumPartitions int -} - -// EvenPodSpreadArgs holds arguments used to configure the EvenPodSpread plugin. -type EvenPodSpreadArgs struct { - MaxSkew int32 -} - -// AvailabilityZonePriorityArgs holds arguments used to configure the AvailabilityZonePriority plugin. -type AvailabilityZonePriorityArgs struct { - MaxSkew int32 -} - -// AvailabilityNodePriorityArgs holds arguments used to configure the AvailabilityNodePriority plugin. -type AvailabilityNodePriorityArgs struct { - MaxSkew int32 -} - -// Code is the Status code/type which is returned from plugins. -type Code int - -// These are predefined codes used in a Status. -const ( - // Success means that plugin ran correctly and found pod schedulable. - Success Code = iota - // Unschedulable is used when a plugin finds a pod unschedulable due to not satisying the predicate. - Unschedulable - // Error is used for internal plugin errors, unexpected input, etc. - Error -) - -// Status indicates the result of running a plugin. -type Status struct { - code Code - reasons []string - err error -} - -// Code returns code of the Status. -func (s *Status) Code() Code { - if s == nil { - return Success - } - return s.code -} - -// Message returns a concatenated message on reasons of the Status. -func (s *Status) Message() string { - if s == nil { - return "" - } - return strings.Join(s.reasons, ", ") -} - -// NewStatus makes a Status out of the given arguments and returns its pointer. -func NewStatus(code Code, reasons ...string) *Status { - s := &Status{ - code: code, - reasons: reasons, - } - if code == Error { - s.err = errors.New(s.Message()) - } - return s -} - -// AsStatus wraps an error in a Status. -func AsStatus(err error) *Status { - return &Status{ - code: Error, - reasons: []string{err.Error()}, - err: err, - } -} - -// AsError returns nil if the status is a success; otherwise returns an "error" object -// with a concatenated message on reasons of the Status. -func (s *Status) AsError() error { - if s.IsSuccess() { - return nil - } - if s.err != nil { - return s.err - } - return errors.New(s.Message()) -} - -// IsSuccess returns true if and only if "Status" is nil or Code is "Success". -func (s *Status) IsSuccess() bool { - return s.Code() == Success -} - -// IsError returns true if and only if "Status" is "Error". -func (s *Status) IsError() bool { - return s.Code() == Error -} - -// IsUnschedulable returns true if "Status" is Unschedulable -func (s *Status) IsUnschedulable() bool { - return s.Code() == Unschedulable -} - -type PodScore struct { - ID int32 - Score uint64 -} - -type PodScoreList []PodScore - -// PluginToPodScores declares a map from plugin name to its PodScoreList. -type PluginToPodScores map[string]PodScoreList - -// PluginToStatus maps plugin name to status. Currently used to identify which Filter plugin -// returned which status. -type PluginToStatus map[string]*Status - -// Merge merges the statuses in the map into one. The resulting status code have the following -// precedence: Error, Unschedulable, Success -func (p PluginToStatus) Merge() *Status { - if len(p) == 0 { - return nil - } - - finalStatus := NewStatus(Success) - for _, s := range p { - if s.Code() == Error { - finalStatus.err = s.AsError() - } - if s.Code() > finalStatus.code { - finalStatus.code = s.Code() - } - - finalStatus.reasons = append(finalStatus.reasons, s.reasons...) - } - - return finalStatus -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/state/state.go b/vendor/knative.dev/eventing/pkg/scheduler/state/state.go index 44069babe9..4f3ed65979 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/state/state.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/state/state.go @@ -19,14 +19,12 @@ package state import ( "context" "encoding/json" - "errors" "math" "strconv" "go.uber.org/zap" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" corev1 "k8s.io/client-go/listers/core/v1" @@ -39,46 +37,27 @@ type StateAccessor interface { // State returns the current state (snapshot) about placed vpods // Take into account reserved vreplicas and update `reserved` to reflect // the current state. - State(ctx context.Context, reserved map[types.NamespacedName]map[string]int32) (*State, error) + State(ctx context.Context) (*State, error) } // state provides information about the current scheduling of all vpods // It is used by for the scheduler and the autoscaler type State struct { // free tracks the free capacity of each pod. + // + // Including pods that might not exist anymore, it reflects the free capacity determined by + // placements in the vpod status. FreeCap []int32 // schedulable pods tracks the pods that aren't being evicted. SchedulablePods []int32 - // LastOrdinal is the ordinal index corresponding to the last statefulset replica - // with placed vpods. - LastOrdinal int32 - // Pod capacity. Capacity int32 // Replicas is the (cached) number of statefulset replicas. Replicas int32 - // Number of available zones in cluster - NumZones int32 - - // Number of available nodes in cluster - NumNodes int32 - - // Scheduling policy type for placing vreplicas on pods - SchedulerPolicy scheduler.SchedulerPolicyType - - // Scheduling policy plugin for placing vreplicas on pods - SchedPolicy *scheduler.SchedulerPolicy - - // De-scheduling policy plugin for removing vreplicas from pods - DeschedPolicy *scheduler.SchedulerPolicy - - // Mapping node names of nodes currently in cluster to their zone info - NodeToZoneMap map[string]string - StatefulSetName string PodLister corev1.PodNamespaceLister @@ -86,12 +65,6 @@ type State struct { // Stores for each vpod, a map of podname to number of vreplicas placed on that pod currently PodSpread map[types.NamespacedName]map[string]int32 - // Stores for each vpod, a map of nodename to total number of vreplicas placed on all pods running on that node currently - NodeSpread map[types.NamespacedName]map[string]int32 - - // Stores for each vpod, a map of zonename to total number of vreplicas placed on all pods located in that zone currently - ZoneSpread map[types.NamespacedName]map[string]int32 - // Pending tracks the number of virtual replicas that haven't been scheduled yet // because there wasn't enough free capacity. Pending map[types.NamespacedName]int32 @@ -114,7 +87,7 @@ func (s *State) SetFree(ordinal int32, value int32) { s.FreeCap[int(ordinal)] = value } -// freeCapacity returns the number of vreplicas that can be used, +// FreeCapacity returns the number of vreplicas that can be used, // up to the last ordinal func (s *State) FreeCapacity() int32 { t := int32(0) @@ -124,20 +97,6 @@ func (s *State) FreeCapacity() int32 { return t } -func (s *State) GetPodInfo(podName string) (zoneName string, nodeName string, err error) { - pod, err := s.PodLister.Get(podName) - if err != nil { - return zoneName, nodeName, err - } - - nodeName = pod.Spec.NodeName - zoneName, ok := s.NodeToZoneMap[nodeName] - if !ok { - return zoneName, nodeName, errors.New("could not find zone") - } - return zoneName, nodeName, nil -} - func (s *State) IsSchedulablePod(ordinal int32) bool { for _, x := range s.SchedulablePods { if x == ordinal { @@ -151,32 +110,24 @@ func (s *State) IsSchedulablePod(ordinal int32) bool { type stateBuilder struct { vpodLister scheduler.VPodLister capacity int32 - schedulerPolicy scheduler.SchedulerPolicyType - nodeLister corev1.NodeLister statefulSetCache *scheduler.ScaleCache statefulSetName string podLister corev1.PodNamespaceLister - schedPolicy *scheduler.SchedulerPolicy - deschedPolicy *scheduler.SchedulerPolicy } // NewStateBuilder returns a StateAccessor recreating the state from scratch each time it is requested -func NewStateBuilder(sfsname string, lister scheduler.VPodLister, podCapacity int32, schedulerPolicy scheduler.SchedulerPolicyType, schedPolicy, deschedPolicy *scheduler.SchedulerPolicy, podlister corev1.PodNamespaceLister, nodeLister corev1.NodeLister, statefulSetCache *scheduler.ScaleCache) StateAccessor { +func NewStateBuilder(sfsname string, lister scheduler.VPodLister, podCapacity int32, podlister corev1.PodNamespaceLister, statefulSetCache *scheduler.ScaleCache) StateAccessor { return &stateBuilder{ vpodLister: lister, capacity: podCapacity, - schedulerPolicy: schedulerPolicy, - nodeLister: nodeLister, statefulSetCache: statefulSetCache, statefulSetName: sfsname, podLister: podlister, - schedPolicy: schedPolicy, - deschedPolicy: deschedPolicy, } } -func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedName]map[string]int32) (*State, error) { +func (s *stateBuilder) State(ctx context.Context) (*State, error) { vpods, err := s.vpodLister() if err != nil { return nil, err @@ -191,44 +142,12 @@ func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedN return nil, err } - free := make([]int32, 0) + freeCap := make([]int32, 0) pending := make(map[types.NamespacedName]int32, 4) expectedVReplicasByVPod := make(map[types.NamespacedName]int32, len(vpods)) schedulablePods := sets.NewInt32() - last := int32(-1) - - // keep track of (vpod key, podname) pairs with existing placements - withPlacement := make(map[types.NamespacedName]map[string]bool) podSpread := make(map[types.NamespacedName]map[string]int32) - nodeSpread := make(map[types.NamespacedName]map[string]int32) - zoneSpread := make(map[types.NamespacedName]map[string]int32) - - //Build the node to zone map - nodes, err := s.nodeLister.List(labels.Everything()) - if err != nil { - return nil, err - } - - nodeToZoneMap := make(map[string]string) - zoneMap := make(map[string]struct{}) - for i := 0; i < len(nodes); i++ { - node := nodes[i] - - if isNodeUnschedulable(node) { - // Ignore node that is currently unschedulable. - continue - } - - zoneName, ok := node.GetLabels()[scheduler.ZoneLabel] - if ok && zoneName != "" { - nodeToZoneMap[node.Name] = zoneName - zoneMap[zoneName] = struct{}{} - } else { - nodeToZoneMap[node.Name] = scheduler.UnknownZone - zoneMap[scheduler.UnknownZone] = struct{}{} - } - } for podId := int32(0); podId < scale.Spec.Replicas && s.podLister != nil; podId++ { pod, err := s.podLister.Get(PodNameFromOrdinal(s.statefulSetName, podId)) @@ -242,24 +161,13 @@ func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedN continue } - node, err := s.nodeLister.Get(pod.Spec.NodeName) - if err != nil { - return nil, err - } - - if isNodeUnschedulable(node) { - // Node is marked as Unschedulable - CANNOT SCHEDULE VREPS on a pod running on this node. - logger.Debugw("Pod is on an unschedulable node", zap.Any("pod", node)) - continue - } - // Pod has no annotation or not annotated as unschedulable and // not on an unschedulable node, so add to feasible schedulablePods.Insert(podId) } for _, p := range schedulablePods.List() { - free, last = s.updateFreeCapacity(logger, free, last, PodNameFromOrdinal(s.statefulSetName, p), 0) + freeCap = s.updateFreeCapacity(logger, freeCap, PodNameFromOrdinal(s.statefulSetName, p), 0) } // Getting current state from existing placements for all vpods @@ -269,21 +177,13 @@ func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedN pending[vpod.GetKey()] = pendingFromVPod(vpod) expectedVReplicasByVPod[vpod.GetKey()] = vpod.GetVReplicas() - withPlacement[vpod.GetKey()] = make(map[string]bool) podSpread[vpod.GetKey()] = make(map[string]int32) - nodeSpread[vpod.GetKey()] = make(map[string]int32) - zoneSpread[vpod.GetKey()] = make(map[string]int32) for i := 0; i < len(ps); i++ { podName := ps[i].PodName vreplicas := ps[i].VReplicas - // Account for reserved vreplicas - vreplicas = withReserved(vpod.GetKey(), podName, vreplicas, reserved) - - free, last = s.updateFreeCapacity(logger, free, last, podName, vreplicas) - - withPlacement[vpod.GetKey()][podName] = true + freeCap = s.updateFreeCapacity(logger, freeCap, podName, vreplicas) pod, err := s.podLister.Get(podName) if err != nil { @@ -291,47 +191,24 @@ func (s *stateBuilder) State(ctx context.Context, reserved map[types.NamespacedN } if pod != nil && schedulablePods.Has(OrdinalFromPodName(pod.GetName())) { - nodeName := pod.Spec.NodeName //node name for this pod - zoneName := nodeToZoneMap[nodeName] //zone name for this pod podSpread[vpod.GetKey()][podName] = podSpread[vpod.GetKey()][podName] + vreplicas - nodeSpread[vpod.GetKey()][nodeName] = nodeSpread[vpod.GetKey()][nodeName] + vreplicas - zoneSpread[vpod.GetKey()][zoneName] = zoneSpread[vpod.GetKey()][zoneName] + vreplicas } } } - // Account for reserved vreplicas with no prior placements - for key, ps := range reserved { - for podName, rvreplicas := range ps { - if wp, ok := withPlacement[key]; ok { - if _, ok := wp[podName]; ok { - // already accounted for - continue - } - - pod, err := s.podLister.Get(podName) - if err != nil { - logger.Warnw("Failed to get pod", zap.String("podName", podName), zap.Error(err)) - } - - if pod != nil && schedulablePods.Has(OrdinalFromPodName(pod.GetName())) { - nodeName := pod.Spec.NodeName //node name for this pod - zoneName := nodeToZoneMap[nodeName] //zone name for this pod - podSpread[key][podName] = podSpread[key][podName] + rvreplicas - nodeSpread[key][nodeName] = nodeSpread[key][nodeName] + rvreplicas - zoneSpread[key][zoneName] = zoneSpread[key][zoneName] + rvreplicas - } - } - - free, last = s.updateFreeCapacity(logger, free, last, podName, rvreplicas) - } + state := &State{ + FreeCap: freeCap, + SchedulablePods: schedulablePods.List(), + Capacity: s.capacity, + Replicas: scale.Spec.Replicas, + StatefulSetName: s.statefulSetName, + PodLister: s.podLister, + PodSpread: podSpread, + Pending: pending, + ExpectedVReplicaByVPod: expectedVReplicasByVPod, } - state := &State{FreeCap: free, SchedulablePods: schedulablePods.List(), LastOrdinal: last, Capacity: s.capacity, Replicas: scale.Spec.Replicas, NumZones: int32(len(zoneMap)), NumNodes: int32(len(nodeToZoneMap)), - SchedulerPolicy: s.schedulerPolicy, SchedPolicy: s.schedPolicy, DeschedPolicy: s.deschedPolicy, NodeToZoneMap: nodeToZoneMap, StatefulSetName: s.statefulSetName, PodLister: s.podLister, - PodSpread: podSpread, NodeSpread: nodeSpread, ZoneSpread: zoneSpread, Pending: pending, ExpectedVReplicaByVPod: expectedVReplicasByVPod} - - logger.Infow("cluster state info", zap.Any("state", state), zap.Any("reserved", toJSONable(reserved))) + logger.Infow("cluster state info", zap.Any("state", state)) return state, nil } @@ -343,23 +220,19 @@ func pendingFromVPod(vpod scheduler.VPod) int32 { return int32(math.Max(float64(0), float64(expected-scheduled))) } -func (s *stateBuilder) updateFreeCapacity(logger *zap.SugaredLogger, free []int32, last int32, podName string, vreplicas int32) ([]int32, int32) { +func (s *stateBuilder) updateFreeCapacity(logger *zap.SugaredLogger, freeCap []int32, podName string, vreplicas int32) []int32 { ordinal := OrdinalFromPodName(podName) - free = grow(free, ordinal, s.capacity) + freeCap = grow(freeCap, ordinal, s.capacity) - free[ordinal] -= vreplicas + freeCap[ordinal] -= vreplicas // Assert the pod is not overcommitted - if free[ordinal] < 0 { + if overcommit := freeCap[ordinal]; overcommit < 0 { // This should not happen anymore. Log as an error but do not interrupt the current scheduling. - logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal])) - } - - if ordinal > last { - last = ordinal + logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("overcommit", overcommit)) } - return free, last + return freeCap } func (s *State) TotalPending() int32 { @@ -392,27 +265,6 @@ func grow(slice []int32, ordinal int32, def int32) []int32 { return slice } -func withReserved(key types.NamespacedName, podName string, committed int32, reserved map[types.NamespacedName]map[string]int32) int32 { - if reserved != nil { - if rps, ok := reserved[key]; ok { - if rvreplicas, ok := rps[podName]; ok { - if committed == rvreplicas { - // new placement has been committed. - delete(rps, podName) - if len(rps) == 0 { - delete(reserved, key) - } - } else { - // new placement hasn't been committed yet. Adjust locally - // needed for descheduling vreps using policies - return rvreplicas - } - } - } - } - return committed -} - func isPodUnschedulable(pod *v1.Pod) bool { annotVal, ok := pod.ObjectMeta.Annotations[scheduler.PodAnnotationKey] unschedulable, err := strconv.ParseBool(annotVal) @@ -423,75 +275,32 @@ func isPodUnschedulable(pod *v1.Pod) bool { return isMarkedUnschedulable || isPending } -func isNodeUnschedulable(node *v1.Node) bool { - noExec := &v1.Taint{ - Key: "node.kubernetes.io/unreachable", - Effect: v1.TaintEffectNoExecute, - } - - noSched := &v1.Taint{ - Key: "node.kubernetes.io/unreachable", - Effect: v1.TaintEffectNoSchedule, - } - - return node.Spec.Unschedulable || - contains(node.Spec.Taints, noExec) || - contains(node.Spec.Taints, noSched) -} - -func contains(taints []v1.Taint, taint *v1.Taint) bool { - for _, v := range taints { - if v.MatchTaint(taint) { - return true - } - } - return false -} - func (s *State) MarshalJSON() ([]byte, error) { type S struct { - FreeCap []int32 `json:"freeCap"` - SchedulablePods []int32 `json:"schedulablePods"` - LastOrdinal int32 `json:"lastOrdinal"` - Capacity int32 `json:"capacity"` - Replicas int32 `json:"replicas"` - NumZones int32 `json:"numZones"` - NumNodes int32 `json:"numNodes"` - NodeToZoneMap map[string]string `json:"nodeToZoneMap"` - StatefulSetName string `json:"statefulSetName"` - PodSpread map[string]map[string]int32 `json:"podSpread"` - NodeSpread map[string]map[string]int32 `json:"nodeSpread"` - ZoneSpread map[string]map[string]int32 `json:"zoneSpread"` - SchedulerPolicy scheduler.SchedulerPolicyType `json:"schedulerPolicy"` - SchedPolicy *scheduler.SchedulerPolicy `json:"schedPolicy"` - DeschedPolicy *scheduler.SchedulerPolicy `json:"deschedPolicy"` - Pending map[string]int32 `json:"pending"` + FreeCap []int32 `json:"freeCap"` + SchedulablePods []int32 `json:"schedulablePods"` + Capacity int32 `json:"capacity"` + Replicas int32 `json:"replicas"` + StatefulSetName string `json:"statefulSetName"` + PodSpread map[string]map[string]int32 `json:"podSpread"` + Pending map[string]int32 `json:"pending"` } sj := S{ FreeCap: s.FreeCap, SchedulablePods: s.SchedulablePods, - LastOrdinal: s.LastOrdinal, Capacity: s.Capacity, Replicas: s.Replicas, - NumZones: s.NumZones, - NumNodes: s.NumNodes, - NodeToZoneMap: s.NodeToZoneMap, StatefulSetName: s.StatefulSetName, - PodSpread: toJSONable(s.PodSpread), - NodeSpread: toJSONable(s.NodeSpread), - ZoneSpread: toJSONable(s.ZoneSpread), - SchedulerPolicy: s.SchedulerPolicy, - SchedPolicy: s.SchedPolicy, - DeschedPolicy: s.DeschedPolicy, + PodSpread: ToJSONable(s.PodSpread), Pending: toJSONablePending(s.Pending), } return json.Marshal(sj) } -func toJSONable(ps map[types.NamespacedName]map[string]int32) map[string]map[string]int32 { +func ToJSONable(ps map[types.NamespacedName]map[string]int32) map[string]map[string]int32 { r := make(map[string]map[string]int32, len(ps)) for k, v := range ps { r[k.String()] = v diff --git a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go index 3245dabc16..653ec12f15 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/autoscaler.go @@ -26,8 +26,10 @@ import ( "go.uber.org/zap" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/integer" "knative.dev/pkg/logging" "knative.dev/pkg/reconciler" @@ -62,7 +64,8 @@ type autoscaler struct { evictor scheduler.Evictor // capacity is the total number of virtual replicas available per pod. - capacity int32 + capacity int32 + minReplicas int32 // refreshPeriod is how often the autoscaler tries to scale down the statefulset refreshPeriod time.Duration @@ -113,6 +116,7 @@ func newAutoscaler(cfg *Config, stateAccessor st.StateAccessor, statefulSetCache evictor: cfg.Evictor, trigger: make(chan context.Context, 1), capacity: cfg.PodCapacity, + minReplicas: cfg.MinReplicas, refreshPeriod: cfg.RefreshPeriod, retryPeriod: cfg.RetryPeriod, lock: new(sync.Mutex), @@ -188,7 +192,7 @@ func (a *autoscaler) doautoscale(ctx context.Context, attemptScaleDown bool) err logger := logging.FromContext(ctx).With("component", "autoscaler") ctx = logging.WithLogger(ctx, logger) - state, err := a.stateAccessor.State(ctx, a.getReserved()) + state, err := a.stateAccessor.State(ctx) if err != nil { logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err)) return err @@ -205,46 +209,15 @@ func (a *autoscaler) doautoscale(ctx context.Context, attemptScaleDown bool) err zap.Int32("replicas", scale.Spec.Replicas), zap.Any("state", state)) - var scaleUpFactor, newreplicas, minNumPods int32 - scaleUpFactor = 1 // Non-HA scaling - if state.SchedPolicy != nil && contains(nil, state.SchedPolicy.Priorities, st.AvailabilityZonePriority) { //HA scaling across zones - scaleUpFactor = state.NumZones - } - if state.SchedPolicy != nil && contains(nil, state.SchedPolicy.Priorities, st.AvailabilityNodePriority) { //HA scaling across nodes - scaleUpFactor = state.NumNodes - } - - newreplicas = state.LastOrdinal + 1 // Ideal number - - if state.SchedulerPolicy == scheduler.MAXFILLUP { - newreplicas = int32(math.Ceil(float64(state.TotalExpectedVReplicas()) / float64(state.Capacity))) - } else { - // Take into account pending replicas and pods that are already filled (for even pod spread) - pending := state.TotalPending() - if pending > 0 { - // Make sure to allocate enough pods for holding all pending replicas. - if state.SchedPolicy != nil && contains(state.SchedPolicy.Predicates, nil, st.EvenPodSpread) && len(state.FreeCap) > 0 { //HA scaling across pods - leastNonZeroCapacity := a.minNonZeroInt(state.FreeCap) - minNumPods = int32(math.Ceil(float64(pending) / float64(leastNonZeroCapacity))) - } else { - minNumPods = int32(math.Ceil(float64(pending) / float64(a.capacity))) - } - newreplicas += int32(math.Ceil(float64(minNumPods)/float64(scaleUpFactor)) * float64(scaleUpFactor)) - } - - if newreplicas <= state.LastOrdinal { - // Make sure to never scale down past the last ordinal - newreplicas = state.LastOrdinal + scaleUpFactor - } - } + newReplicas := integer.Int32Max(int32(math.Ceil(float64(state.TotalExpectedVReplicas())/float64(state.Capacity))), a.minReplicas) // Only scale down if permitted - if !attemptScaleDown && newreplicas < scale.Spec.Replicas { - newreplicas = scale.Spec.Replicas + if !attemptScaleDown && newReplicas < scale.Spec.Replicas { + newReplicas = scale.Spec.Replicas } - if newreplicas != scale.Spec.Replicas { - scale.Spec.Replicas = newreplicas + if newReplicas != scale.Spec.Replicas { + scale.Spec.Replicas = newReplicas logger.Infow("updating adapter replicas", zap.Int32("replicas", scale.Spec.Replicas)) _, err = a.statefulSetCache.UpdateScale(ctx, a.statefulSetName, scale, metav1.UpdateOptions{}) @@ -255,12 +228,12 @@ func (a *autoscaler) doautoscale(ctx context.Context, attemptScaleDown bool) err } else if attemptScaleDown { // since the number of replicas hasn't changed and time has approached to scale down, // take the opportunity to compact the vreplicas - return a.mayCompact(logger, state, scaleUpFactor) + return a.mayCompact(logger, state) } return nil } -func (a *autoscaler) mayCompact(logger *zap.SugaredLogger, s *st.State, scaleUpFactor int32) error { +func (a *autoscaler) mayCompact(logger *zap.SugaredLogger, s *st.State) error { // This avoids a too aggressive scale down by adding a "grace period" based on the refresh // period @@ -275,53 +248,24 @@ func (a *autoscaler) mayCompact(logger *zap.SugaredLogger, s *st.State, scaleUpF } logger.Debugw("Trying to compact and scale down", - zap.Int32("scaleUpFactor", scaleUpFactor), zap.Any("state", s), ) - // when there is only one pod there is nothing to move or number of pods is just enough! - if s.LastOrdinal < 1 || len(s.SchedulablePods) <= int(scaleUpFactor) { - return nil - } - - if s.SchedulerPolicy == scheduler.MAXFILLUP { - // Determine if there is enough free capacity to - // move all vreplicas placed in the last pod to pods with a lower ordinal - freeCapacity := s.FreeCapacity() - s.Free(s.LastOrdinal) - usedInLastPod := s.Capacity - s.Free(s.LastOrdinal) - - if freeCapacity >= usedInLastPod { - a.lastCompactAttempt = time.Now() - err := a.compact(s, scaleUpFactor) - if err != nil { - return fmt.Errorf("vreplicas compaction failed (scaleUpFactor %d): %w", scaleUpFactor, err) - } - } - - // only do 1 replica at a time to avoid overloading the scheduler with too many - // rescheduling requests. - } else if s.SchedPolicy != nil { - //Below calculation can be optimized to work for recovery scenarios when nodes/zones are lost due to failure - freeCapacity := s.FreeCapacity() - usedInLastXPods := s.Capacity * scaleUpFactor - for i := int32(0); i < scaleUpFactor && s.LastOrdinal-i >= 0; i++ { - freeCapacity = freeCapacity - s.Free(s.LastOrdinal-i) - usedInLastXPods = usedInLastXPods - s.Free(s.LastOrdinal-i) - } - - if (freeCapacity >= usedInLastXPods) && //remaining pods can hold all vreps from evicted pods - (s.Replicas-scaleUpFactor >= scaleUpFactor) { //remaining # of pods is enough for HA scaling - a.lastCompactAttempt = time.Now() - err := a.compact(s, scaleUpFactor) - if err != nil { - return fmt.Errorf("vreplicas compaction failed (scaleUpFactor %d): %w", scaleUpFactor, err) - } + // Determine if there are vpods that need compaction + if s.Replicas != int32(len(s.FreeCap)) { + a.lastCompactAttempt = time.Now() + err := a.compact(s) + if err != nil { + return fmt.Errorf("vreplicas compaction failed: %w", err) } } + + // only do 1 replica at a time to avoid overloading the scheduler with too many + // rescheduling requests. return nil } -func (a *autoscaler) compact(s *st.State, scaleUpFactor int32) error { +func (a *autoscaler) compact(s *st.State) error { var pod *v1.Pod vpods, err := a.vpodLister() if err != nil { @@ -331,47 +275,20 @@ func (a *autoscaler) compact(s *st.State, scaleUpFactor int32) error { for _, vpod := range vpods { placements := vpod.GetPlacements() for i := len(placements) - 1; i >= 0; i-- { //start from the last placement - for j := int32(0); j < scaleUpFactor; j++ { - ordinal := st.OrdinalFromPodName(placements[i].PodName) - - if ordinal == s.LastOrdinal-j { - pod, err = s.PodLister.Get(placements[i].PodName) - if err != nil { - return fmt.Errorf("failed to get pod %s: %w", placements[i].PodName, err) - } - - err = a.evictor(pod, vpod, &placements[i]) - if err != nil { - return fmt.Errorf("failed to evict pod %s: %w", pod.Name, err) - } + ordinal := st.OrdinalFromPodName(placements[i].PodName) + + if ordinal >= s.Replicas { + pod, err = s.PodLister.Get(placements[i].PodName) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to get pod %s: %w", placements[i].PodName, err) + } + + err = a.evictor(pod, vpod, &placements[i]) + if err != nil { + return fmt.Errorf("failed to evict pod %s: %w", pod.Name, err) } } } } return nil } - -func contains(preds []scheduler.PredicatePolicy, priors []scheduler.PriorityPolicy, name string) bool { - for _, v := range preds { - if v.Name == name { - return true - } - } - for _, v := range priors { - if v.Name == name { - return true - } - } - - return false -} - -func (a *autoscaler) minNonZeroInt(slice []int32) int32 { - min := a.capacity - for _, v := range slice { - if v < min && v > 0 { - min = v - } - } - return min -} diff --git a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go index 6995d6ff45..cf2834e7c3 100644 --- a/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go +++ b/vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go @@ -18,9 +18,7 @@ package statefulset import ( "context" - "crypto/rand" "fmt" - "math/big" "sort" "sync" "time" @@ -28,11 +26,11 @@ import ( "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - "k8s.io/utils/integer" "knative.dev/pkg/logging" "knative.dev/pkg/reconciler" @@ -41,19 +39,7 @@ import ( duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1" "knative.dev/eventing/pkg/scheduler" - "knative.dev/eventing/pkg/scheduler/factory" st "knative.dev/eventing/pkg/scheduler/state" - - _ "knative.dev/eventing/pkg/scheduler/plugins/core/availabilitynodepriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/availabilityzonepriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/evenpodspread" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/lowestordinalpriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/podfitsresources" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilitynodepriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilityzonepriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/removewithevenpodspreadpriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/core/removewithhighestordinalpriority" - _ "knative.dev/eventing/pkg/scheduler/plugins/kafka/nomaxresourcecount" ) type GetReserved func() map[types.NamespacedName]map[string]int32 @@ -65,19 +51,16 @@ type Config struct { ScaleCacheConfig scheduler.ScaleCacheConfig `json:"scaleCacheConfig"` // PodCapacity max capacity for each StatefulSet's pod. PodCapacity int32 `json:"podCapacity"` + // MinReplicas is the minimum replicas of the statefulset. + MinReplicas int32 `json:"minReplicas"` // Autoscaler refresh period RefreshPeriod time.Duration `json:"refreshPeriod"` // Autoscaler retry period RetryPeriod time.Duration `json:"retryPeriod"` - SchedulerPolicy scheduler.SchedulerPolicyType `json:"schedulerPolicy"` - SchedPolicy *scheduler.SchedulerPolicy `json:"schedPolicy"` - DeschedPolicy *scheduler.SchedulerPolicy `json:"deschedPolicy"` - Evictor scheduler.Evictor `json:"-"` - VPodLister scheduler.VPodLister `json:"-"` - NodeLister corev1listers.NodeLister `json:"-"` + VPodLister scheduler.VPodLister `json:"-"` // Pod lister for statefulset: StatefulSetNamespace / StatefulSetName PodLister corev1listers.PodNamespaceLister `json:"-"` @@ -93,7 +76,7 @@ func New(ctx context.Context, cfg *Config) (scheduler.Scheduler, error) { scaleCache := scheduler.NewScaleCache(ctx, cfg.StatefulSetNamespace, kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace), cfg.ScaleCacheConfig) - stateAccessor := st.NewStateBuilder(cfg.StatefulSetName, cfg.VPodLister, cfg.PodCapacity, cfg.SchedulerPolicy, cfg.SchedPolicy, cfg.DeschedPolicy, cfg.PodLister, cfg.NodeLister, scaleCache) + stateAccessor := st.NewStateBuilder(cfg.StatefulSetName, cfg.VPodLister, cfg.PodCapacity, cfg.PodLister, scaleCache) var getReserved GetReserved cfg.getReserved = func() map[types.NamespacedName]map[string]int32 { @@ -118,14 +101,6 @@ func New(ctx context.Context, cfg *Config) (scheduler.Scheduler, error) { type Pending map[types.NamespacedName]int32 -func (p Pending) Total() int32 { - t := int32(0) - for _, vr := range p { - t += vr - } - return t -} - // StatefulSetScheduler is a scheduler placing VPod into statefulset-managed set of pods type StatefulSetScheduler struct { statefulSetName string @@ -152,9 +127,35 @@ var ( // Promote implements reconciler.LeaderAware. func (s *StatefulSetScheduler) Promote(b reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + if !b.Has(ephemeralLeaderElectionObject) { + return nil + } + if v, ok := s.autoscaler.(reconciler.LeaderAware); ok { return v.Promote(b, enq) } + if err := s.initReserved(); err != nil { + return err + } + return nil +} + +func (s *StatefulSetScheduler) initReserved() error { + s.reservedMu.Lock() + defer s.reservedMu.Unlock() + + vPods, err := s.vpodLister() + if err != nil { + return fmt.Errorf("failed to list vPods during init: %w", err) + } + + s.reserved = make(map[types.NamespacedName]map[string]int32, len(vPods)) + for _, vPod := range vPods { + s.reserved[vPod.GetKey()] = make(map[string]int32, len(vPod.GetPlacements())) + for _, placement := range vPod.GetPlacements() { + s.reserved[vPod.GetKey()][placement.PodName] += placement.VReplicas + } + } return nil } @@ -170,7 +171,7 @@ func newStatefulSetScheduler(ctx context.Context, stateAccessor st.StateAccessor, autoscaler Autoscaler) *StatefulSetScheduler { - scheduler := &StatefulSetScheduler{ + s := &StatefulSetScheduler{ statefulSetNamespace: cfg.StatefulSetNamespace, statefulSetName: cfg.StatefulSetName, statefulSetClient: kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace), @@ -188,13 +189,16 @@ func newStatefulSetScheduler(ctx context.Context, informers.WithNamespace(cfg.StatefulSetNamespace), ) - sif.Apps().V1().StatefulSets().Informer(). + _, err := sif.Apps().V1().StatefulSets().Informer(). AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: controller.FilterWithNameAndNamespace(cfg.StatefulSetNamespace, cfg.StatefulSetName), Handler: controller.HandleAll(func(i interface{}) { - scheduler.updateStatefulset(ctx, i) + s.updateStatefulset(ctx, i) }), }) + if err != nil { + logging.FromContext(ctx).Fatalw("Failed to register informer", zap.Error(err)) + } sif.Start(ctx.Done()) _ = sif.WaitForCacheSync(ctx.Done()) @@ -204,7 +208,7 @@ func newStatefulSetScheduler(ctx context.Context, sif.Shutdown() }() - return scheduler + return s } func (s *StatefulSetScheduler) Schedule(ctx context.Context, vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) { @@ -214,9 +218,6 @@ func (s *StatefulSetScheduler) Schedule(ctx context.Context, vpod scheduler.VPod defer s.reservedMu.Unlock() placements, err := s.scheduleVPod(ctx, vpod) - if placements == nil { - return placements, err - } sort.SliceStable(placements, func(i int, j int) bool { return st.OrdinalFromPodName(placements[i].PodName) < st.OrdinalFromPodName(placements[j].PodName) @@ -234,30 +235,42 @@ func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler. // Get the current placements state // Quite an expensive operation but safe and simple. - state, err := s.stateAccessor.State(ctx, s.reserved) + state, err := s.stateAccessor.State(ctx) if err != nil { logger.Debug("error while refreshing scheduler state (will retry)", zap.Error(err)) return nil, err } - // Clean up reserved from removed resources that don't appear in the vpod list anymore and have - // no pending resources. - reserved := make(map[types.NamespacedName]map[string]int32) - for k, v := range s.reserved { - if pendings, ok := state.Pending[k]; ok { - if pendings == 0 { - reserved[k] = map[string]int32{} - } else { - reserved[k] = v - } + reservedByPodName := make(map[string]int32, 2) + for _, v := range s.reserved { + for podName, vReplicas := range v { + v, _ := reservedByPodName[podName] + reservedByPodName[podName] = vReplicas + v + } + } + + // Use reserved placements as starting point, if we have them. + existingPlacements := make([]duckv1alpha1.Placement, 0) + if placements, ok := s.reserved[vpod.GetKey()]; ok { + existingPlacements = make([]duckv1alpha1.Placement, 0, len(placements)) + for podName, n := range placements { + existingPlacements = append(existingPlacements, duckv1alpha1.Placement{ + PodName: podName, + VReplicas: n, + }) } } - s.reserved = reserved - logger.Debugw("scheduling", zap.Any("state", state)) + sort.SliceStable(existingPlacements, func(i int, j int) bool { + return st.OrdinalFromPodName(existingPlacements[i].PodName) < st.OrdinalFromPodName(existingPlacements[j].PodName) + }) - existingPlacements := vpod.GetPlacements() - var left int32 + logger.Debugw("scheduling state", + zap.Any("state", state), + zap.Any("reservedByPodName", reservedByPodName), + zap.Any("reserved", st.ToJSONable(s.reserved)), + zap.Any("vpod", vpod), + ) // Remove unschedulable or adjust overcommitted pods from placements var placements []duckv1alpha1.Placement @@ -272,23 +285,26 @@ func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler. } // Handle overcommitted pods. - if state.Free(ordinal) < 0 { + reserved, _ := reservedByPodName[p.PodName] + if state.Capacity-reserved < 0 { // vr > free => vr: 9, overcommit 4 -> free: 0, vr: 5, pending: +4 // vr = free => vr: 4, overcommit 4 -> free: 0, vr: 0, pending: +4 // vr < free => vr: 3, overcommit 4 -> free: -1, vr: 0, pending: +3 - overcommit := -state.FreeCap[ordinal] + overcommit := -(state.Capacity - reserved) logger.Debugw("overcommit", zap.Any("overcommit", overcommit), zap.Any("placement", p)) if p.VReplicas >= overcommit { state.SetFree(ordinal, 0) state.Pending[vpod.GetKey()] += overcommit + reservedByPodName[p.PodName] -= overcommit p.VReplicas = p.VReplicas - overcommit } else { state.SetFree(ordinal, p.VReplicas-overcommit) state.Pending[vpod.GetKey()] += p.VReplicas + reservedByPodName[p.PodName] -= p.VReplicas p.VReplicas = 0 } @@ -314,51 +330,25 @@ func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler. return placements, nil } - if state.SchedulerPolicy != "" { - // Need less => scale down - if tr > vpod.GetVReplicas() { - logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), - zap.Any("placements", placements), - zap.Any("existingPlacements", existingPlacements)) - - placements = s.removeReplicas(tr-vpod.GetVReplicas(), placements) - - // Do not trigger the autoscaler to avoid unnecessary churn - - return placements, nil - } - - // Need more => scale up - logger.Debugw("scaling up", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), + // Need less => scale down + if tr > vpod.GetVReplicas() { + logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), zap.Any("placements", placements), zap.Any("existingPlacements", existingPlacements)) - placements, left = s.addReplicas(state, vpod.GetVReplicas()-tr, placements) + placements = s.removeReplicas(tr-vpod.GetVReplicas(), placements) - } else { //Predicates and priorities must be used for scheduling - // Need less => scale down - if tr > vpod.GetVReplicas() && state.DeschedPolicy != nil { - logger.Infow("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), - zap.Any("placements", placements), - zap.Any("existingPlacements", existingPlacements)) - placements = s.removeReplicasWithPolicy(ctx, vpod, tr-vpod.GetVReplicas(), placements) + // Do not trigger the autoscaler to avoid unnecessary churn - // Do not trigger the autoscaler to avoid unnecessary churn - - return placements, nil - } + return placements, nil + } - if state.SchedPolicy != nil { + // Need more => scale up + logger.Debugw("scaling up", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), + zap.Any("placements", placements), + zap.Any("existingPlacements", existingPlacements)) - // Need more => scale up - // rebalancing needed for all vreps most likely since there are pending vreps from previous reconciliation - // can fall here when vreps scaled up or after eviction - logger.Infow("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()), - zap.Any("placements", placements), - zap.Any("existingPlacements", existingPlacements)) - placements, left = s.rebalanceReplicasWithPolicy(ctx, vpod, vpod.GetVReplicas(), placements) - } - } + placements, left := s.addReplicas(state, reservedByPodName, vpod, vpod.GetVReplicas()-tr, placements) if left > 0 { // Give time for the autoscaler to do its job @@ -370,12 +360,6 @@ func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler. s.autoscaler.Autoscale(ctx) } - if state.SchedulerPolicy == "" && state.SchedPolicy != nil { - logger.Info("reverting to previous placements") - s.reservePlacements(vpod, existingPlacements) // rebalancing doesn't care about new placements since all vreps will be re-placed - return existingPlacements, s.notEnoughPodReplicas(left) // requeue to wait for the autoscaler to do its job - } - return placements, s.notEnoughPodReplicas(left) } @@ -384,408 +368,125 @@ func (s *StatefulSetScheduler) scheduleVPod(ctx context.Context, vpod scheduler. return placements, nil } -func toJSONable(pending map[types.NamespacedName]int32) map[string]int32 { - r := make(map[string]int32, len(pending)) - for k, v := range pending { - r[k.String()] = v - } - return r -} - -func (s *StatefulSetScheduler) rebalanceReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) { - s.makeZeroPlacements(vpod, placements) - placements, diff = s.addReplicasWithPolicy(ctx, vpod, diff, make([]duckv1alpha1.Placement, 0)) //start fresh with a new placements list - - return placements, diff -} - -func (s *StatefulSetScheduler) removeReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement { - logger := logging.FromContext(ctx).Named("remove replicas with policy") - numVreps := diff - - for i := int32(0); i < numVreps; i++ { //deschedule one vreplica at a time - state, err := s.stateAccessor.State(ctx, s.reserved) - if err != nil { - logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err)) - return placements - } - - feasiblePods := s.findFeasiblePods(ctx, state, vpod, state.DeschedPolicy) - feasiblePods = s.removePodsNotInPlacement(vpod, feasiblePods) - if len(feasiblePods) == 1 { //nothing to score, remove vrep from that pod - placementPodID := feasiblePods[0] - logger.Infof("Selected pod #%v to remove vreplica #%v from", placementPodID, i) - placements = s.removeSelectionFromPlacements(placementPodID, placements) - state.SetFree(placementPodID, state.Free(placementPodID)+1) - s.reservePlacements(vpod, placements) - continue - } - - priorityList, err := s.prioritizePods(ctx, state, vpod, feasiblePods, state.DeschedPolicy) - if err != nil { - logger.Info("error while scoring pods using priorities", zap.Error(err)) - s.reservePlacements(vpod, placements) - break - } - - placementPodID, err := s.selectPod(priorityList) - if err != nil { - logger.Info("error while selecting the placement pod", zap.Error(err)) - s.reservePlacements(vpod, placements) - break - } - - logger.Infof("Selected pod #%v to remove vreplica #%v from", placementPodID, i) - placements = s.removeSelectionFromPlacements(placementPodID, placements) - state.SetFree(placementPodID, state.Free(placementPodID)+1) - s.reservePlacements(vpod, placements) - } - return placements -} - -func (s *StatefulSetScheduler) removeSelectionFromPlacements(placementPodID int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement { +func (s *StatefulSetScheduler) removeReplicas(diff int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement { newPlacements := make([]duckv1alpha1.Placement, 0, len(placements)) - - for i := 0; i < len(placements); i++ { - ordinal := st.OrdinalFromPodName(placements[i].PodName) - if placementPodID == ordinal { - if placements[i].VReplicas == 1 { - // remove the entire placement - } else { - newPlacements = append(newPlacements, duckv1alpha1.Placement{ - PodName: placements[i].PodName, - VReplicas: placements[i].VReplicas - 1, - }) - } + for i := len(placements) - 1; i > -1; i-- { + if diff >= placements[i].VReplicas { + // remove the entire placement + diff -= placements[i].VReplicas } else { newPlacements = append(newPlacements, duckv1alpha1.Placement{ PodName: placements[i].PodName, - VReplicas: placements[i].VReplicas, + VReplicas: placements[i].VReplicas - diff, }) + diff = 0 } } return newPlacements } -func (s *StatefulSetScheduler) addReplicasWithPolicy(ctx context.Context, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) { - logger := logging.FromContext(ctx).Named("add replicas with policy") - - numVreps := diff - for i := int32(0); i < numVreps; i++ { //schedule one vreplica at a time (find most suitable pod placement satisying predicates with high score) - // Get the current placements state - state, err := s.stateAccessor.State(ctx, s.reserved) - if err != nil { - logger.Info("error while refreshing scheduler state (will retry)", zap.Error(err)) - return placements, diff - } - - if s.replicas == 0 { //no pods to filter - logger.Infow("no pods available in statefulset") - s.reservePlacements(vpod, placements) - diff = numVreps - i //for autoscaling up - break //end the iteration for all vreps since there are not pods - } - - feasiblePods := s.findFeasiblePods(ctx, state, vpod, state.SchedPolicy) - if len(feasiblePods) == 0 { //no pods available to schedule this vreplica - logger.Info("no feasible pods available to schedule this vreplica") - s.reservePlacements(vpod, placements) - diff = numVreps - i //for autoscaling up and possible rebalancing - break - } - - /* if len(feasiblePods) == 1 { //nothing to score, place vrep on that pod (Update: for HA, must run HA scorers) - placementPodID := feasiblePods[0] - logger.Infof("Selected pod #%v for vreplica #%v ", placementPodID, i) - placements = s.addSelectionToPlacements(placementPodID, placements) - //state.SetFree(placementPodID, state.Free(placementPodID)-1) - s.reservePlacements(vpod, placements) - diff-- - continue - } */ - - priorityList, err := s.prioritizePods(ctx, state, vpod, feasiblePods, state.SchedPolicy) - if err != nil { - logger.Info("error while scoring pods using priorities", zap.Error(err)) - s.reservePlacements(vpod, placements) - diff = numVreps - i //for autoscaling up and possible rebalancing - break - } - - placementPodID, err := s.selectPod(priorityList) - if err != nil { - logger.Info("error while selecting the placement pod", zap.Error(err)) - s.reservePlacements(vpod, placements) - diff = numVreps - i //for autoscaling up and possible rebalancing - break - } - - logger.Infof("Selected pod #%v for vreplica #%v", placementPodID, i) - placements = s.addSelectionToPlacements(placementPodID, placements) - state.SetFree(placementPodID, state.Free(placementPodID)-1) - s.reservePlacements(vpod, placements) - diff-- +func (s *StatefulSetScheduler) addReplicas(states *st.State, reservedByPodName map[string]int32, vpod scheduler.VPod, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) { + if states.Replicas <= 0 { + return placements, diff } - return placements, diff -} -func (s *StatefulSetScheduler) addSelectionToPlacements(placementPodID int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement { - seen := false - - for i := 0; i < len(placements); i++ { - ordinal := st.OrdinalFromPodName(placements[i].PodName) - if placementPodID == ordinal { - seen = true - placements[i].VReplicas = placements[i].VReplicas + 1 - } - } - if !seen { - placements = append(placements, duckv1alpha1.Placement{ - PodName: st.PodNameFromOrdinal(s.statefulSetName, placementPodID), - VReplicas: 1, - }) - } - return placements -} + newPlacements := make([]duckv1alpha1.Placement, 0, len(placements)) -// findFeasiblePods finds the pods that fit the filter plugins -func (s *StatefulSetScheduler) findFeasiblePods(ctx context.Context, state *st.State, vpod scheduler.VPod, policy *scheduler.SchedulerPolicy) []int32 { - feasiblePods := make([]int32, 0) - for _, podId := range state.SchedulablePods { - statusMap := s.RunFilterPlugins(ctx, state, vpod, podId, policy) - status := statusMap.Merge() - if status.IsSuccess() { - feasiblePods = append(feasiblePods, podId) - } + // Preserve existing placements + for _, p := range placements { + newPlacements = append(newPlacements, *p.DeepCopy()) } - return feasiblePods -} + candidates := s.candidatesOrdered(states, vpod, placements) -// removePodsNotInPlacement removes pods that do not have vreplicas placed -func (s *StatefulSetScheduler) removePodsNotInPlacement(vpod scheduler.VPod, feasiblePods []int32) []int32 { - newFeasiblePods := make([]int32, 0) - for _, e := range vpod.GetPlacements() { - for _, podID := range feasiblePods { - if podID == st.OrdinalFromPodName(e.PodName) { //if pod is in current placement list - newFeasiblePods = append(newFeasiblePods, podID) + // Spread replicas in as many candidates as possible. + foundFreeCandidate := true + for diff > 0 && foundFreeCandidate { + foundFreeCandidate = false + for _, ordinal := range candidates { + if diff <= 0 { + break } - } - } - - return newFeasiblePods -} -// prioritizePods prioritizes the pods by running the score plugins, which return a score for each pod. -// The scores from each plugin are added together to make the score for that pod. -func (s *StatefulSetScheduler) prioritizePods(ctx context.Context, states *st.State, vpod scheduler.VPod, feasiblePods []int32, policy *scheduler.SchedulerPolicy) (st.PodScoreList, error) { - logger := logging.FromContext(ctx).Named("prioritize all feasible pods") - - // If no priority configs are provided, then all pods will have a score of one - result := make(st.PodScoreList, 0, len(feasiblePods)) - if !s.HasScorePlugins(states, policy) { - for _, podID := range feasiblePods { - result = append(result, st.PodScore{ - ID: podID, - Score: 1, - }) - } - return result, nil - } + podName := st.PodNameFromOrdinal(states.StatefulSetName, ordinal) + reserved, _ := reservedByPodName[podName] + // Is there space? + if states.Capacity-reserved > 0 { + foundFreeCandidate = true + allocation := int32(1) - scoresMap, scoreStatus := s.RunScorePlugins(ctx, states, vpod, feasiblePods, policy) - if !scoreStatus.IsSuccess() { - logger.Infof("FAILURE! Cannot score feasible pods due to plugin errors %v", scoreStatus.AsError()) - return nil, scoreStatus.AsError() - } - - // Summarize all scores. - for i := range feasiblePods { - result = append(result, st.PodScore{ID: feasiblePods[i], Score: 0}) - for j := range scoresMap { - result[i].Score += scoresMap[j][i].Score - } - } - - return result, nil -} + newPlacements = upsertPlacements(newPlacements, duckv1alpha1.Placement{ + PodName: st.PodNameFromOrdinal(states.StatefulSetName, ordinal), + VReplicas: allocation, + }) -// selectPod takes a prioritized list of pods and then picks one -func (s *StatefulSetScheduler) selectPod(podScoreList st.PodScoreList) (int32, error) { - if len(podScoreList) == 0 { - return -1, fmt.Errorf("empty priority list") //no selected pod - } - - maxScore := podScoreList[0].Score - selected := podScoreList[0].ID - cntOfMaxScore := int64(1) - for _, ps := range podScoreList[1:] { - if ps.Score > maxScore { - maxScore = ps.Score - selected = ps.ID - cntOfMaxScore = 1 - } else if ps.Score == maxScore { //if equal scores, randomly picks one - cntOfMaxScore++ - randNum, err := rand.Int(rand.Reader, big.NewInt(cntOfMaxScore)) - if err != nil { - return -1, fmt.Errorf("failed to generate random number") - } - if randNum.Int64() == int64(0) { - selected = ps.ID + diff -= allocation + reservedByPodName[podName] += allocation } } } - return selected, nil -} -// RunFilterPlugins runs the set of configured Filter plugins for a vrep on the given pod. -// If any of these plugins doesn't return "Success", the pod is not suitable for placing the vrep. -// Meanwhile, the failure message and status are set for the given pod. -func (s *StatefulSetScheduler) RunFilterPlugins(ctx context.Context, states *st.State, vpod scheduler.VPod, podID int32, policy *scheduler.SchedulerPolicy) st.PluginToStatus { - logger := logging.FromContext(ctx).Named("run all filter plugins") - - statuses := make(st.PluginToStatus) - for _, plugin := range policy.Predicates { - pl, err := factory.GetFilterPlugin(plugin.Name) - if err != nil { - logger.Error("Could not find filter plugin in Registry: ", plugin.Name) - continue - } - - //logger.Infof("Going to run filter plugin: %s using state: %v ", pl.Name(), states) - pluginStatus := s.runFilterPlugin(ctx, pl, plugin.Args, states, vpod, podID) - if !pluginStatus.IsSuccess() { - if !pluginStatus.IsUnschedulable() { - errStatus := st.NewStatus(st.Error, fmt.Sprintf("running %q filter plugin for pod %q failed with: %v", pl.Name(), podID, pluginStatus.Message())) - return map[string]*st.Status{pl.Name(): errStatus} //TODO: if one plugin fails, then no more plugins are run - } - statuses[pl.Name()] = pluginStatus - return statuses - } + if len(newPlacements) == 0 { + return nil, diff } - - return statuses + return newPlacements, diff } -func (s *StatefulSetScheduler) runFilterPlugin(ctx context.Context, pl st.FilterPlugin, args interface{}, states *st.State, vpod scheduler.VPod, podID int32) *st.Status { - status := pl.Filter(ctx, args, states, vpod.GetKey(), podID) - return status -} +func (s *StatefulSetScheduler) candidatesOrdered(states *st.State, vpod scheduler.VPod, placements []duckv1alpha1.Placement) []int32 { + existingPlacements := sets.New[string]() + candidates := make([]int32, len(states.SchedulablePods)) -// RunScorePlugins runs the set of configured scoring plugins. It returns a list that stores for each scoring plugin name the corresponding PodScoreList(s). -// It also returns *Status, which is set to non-success if any of the plugins returns a non-success status. -func (s *StatefulSetScheduler) RunScorePlugins(ctx context.Context, states *st.State, vpod scheduler.VPod, feasiblePods []int32, policy *scheduler.SchedulerPolicy) (st.PluginToPodScores, *st.Status) { - logger := logging.FromContext(ctx).Named("run all score plugins") + firstIdx := 0 + lastIdx := len(candidates) - 1 - pluginToPodScores := make(st.PluginToPodScores, len(policy.Priorities)) - for _, plugin := range policy.Priorities { - pl, err := factory.GetScorePlugin(plugin.Name) - if err != nil { - logger.Error("Could not find score plugin in registry: ", plugin.Name) + // De-prioritize existing placements pods, add existing placements to the tail of the candidates. + // Start from the last one so that within the "existing replicas" group, we prioritize lower ordinals + // to reduce compaction. + for i := len(placements) - 1; i >= 0; i-- { + placement := placements[i] + ordinal := st.OrdinalFromPodName(placement.PodName) + if !states.IsSchedulablePod(ordinal) { continue } - - //logger.Infof("Going to run score plugin: %s using state: %v ", pl.Name(), states) - pluginToPodScores[pl.Name()] = make(st.PodScoreList, len(feasiblePods)) - for index, podID := range feasiblePods { - score, pluginStatus := s.runScorePlugin(ctx, pl, plugin.Args, states, feasiblePods, vpod, podID) - if !pluginStatus.IsSuccess() { - errStatus := st.NewStatus(st.Error, fmt.Sprintf("running %q scoring plugin for pod %q failed with: %v", pl.Name(), podID, pluginStatus.AsError())) - return pluginToPodScores, errStatus //TODO: if one plugin fails, then no more plugins are run - } - - score = score * plugin.Weight //WEIGHED SCORE VALUE - //logger.Infof("scoring plugin %q produced score %v for pod %q: %v", pl.Name(), score, podID, pluginStatus) - pluginToPodScores[pl.Name()][index] = st.PodScore{ - ID: podID, - Score: score, - } - } - - status := pl.ScoreExtensions().NormalizeScore(ctx, states, pluginToPodScores[pl.Name()]) //NORMALIZE SCORES FOR ALL FEASIBLE PODS - if !status.IsSuccess() { - errStatus := st.NewStatus(st.Error, fmt.Sprintf("running %q scoring plugin failed with: %v", pl.Name(), status.AsError())) - return pluginToPodScores, errStatus + // This should really never happen as placements are de-duped, however, better to handle + // edge cases in case the prerequisite doesn't hold in the future. + if existingPlacements.Has(placement.PodName) { + continue } + candidates[lastIdx] = ordinal + lastIdx-- + existingPlacements.Insert(placement.PodName) } - return pluginToPodScores, st.NewStatus(st.Success) -} - -func (s *StatefulSetScheduler) runScorePlugin(ctx context.Context, pl st.ScorePlugin, args interface{}, states *st.State, feasiblePods []int32, vpod scheduler.VPod, podID int32) (uint64, *st.Status) { - score, status := pl.Score(ctx, args, states, feasiblePods, vpod.GetKey(), podID) - return score, status -} - -// HasScorePlugins returns true if at least one score plugin is defined. -func (s *StatefulSetScheduler) HasScorePlugins(state *st.State, policy *scheduler.SchedulerPolicy) bool { - return len(policy.Priorities) > 0 -} - -func (s *StatefulSetScheduler) removeReplicas(diff int32, placements []duckv1alpha1.Placement) []duckv1alpha1.Placement { - newPlacements := make([]duckv1alpha1.Placement, 0, len(placements)) - for i := len(placements) - 1; i > -1; i-- { - if diff >= placements[i].VReplicas { - // remove the entire placement - diff -= placements[i].VReplicas - } else { - newPlacements = append(newPlacements, duckv1alpha1.Placement{ - PodName: placements[i].PodName, - VReplicas: placements[i].VReplicas - diff, - }) - diff = 0 + // Prioritize reserved placements that don't appear in the committed placements. + if reserved, ok := s.reserved[vpod.GetKey()]; ok { + for podName := range reserved { + if !states.IsSchedulablePod(st.OrdinalFromPodName(podName)) { + continue + } + if existingPlacements.Has(podName) { + continue + } + candidates[firstIdx] = st.OrdinalFromPodName(podName) + firstIdx++ + existingPlacements.Insert(podName) } } - return newPlacements -} - -func (s *StatefulSetScheduler) addReplicas(states *st.State, diff int32, placements []duckv1alpha1.Placement) ([]duckv1alpha1.Placement, int32) { - // Pod affinity algorithm: prefer adding replicas to existing pods before considering other replicas - newPlacements := make([]duckv1alpha1.Placement, 0, len(placements)) - - // Add to existing - for i := 0; i < len(placements); i++ { - podName := placements[i].PodName - ordinal := st.OrdinalFromPodName(podName) - - // Is there space in PodName? - f := states.Free(ordinal) - if diff >= 0 && f > 0 { - allocation := integer.Int32Min(f, diff) - newPlacements = append(newPlacements, duckv1alpha1.Placement{ - PodName: podName, - VReplicas: placements[i].VReplicas + allocation, - }) - diff -= allocation - states.SetFree(ordinal, f-allocation) - } else { - newPlacements = append(newPlacements, placements[i]) + // Add all the ordinals to the candidates list. + // De-prioritize the last ordinals over lower ordinals so that we reduce the chances for compaction. + for ordinal := s.replicas - 1; ordinal >= 0; ordinal-- { + if !states.IsSchedulablePod(ordinal) { + continue } - } - - if diff > 0 { - // Needs to allocate replicas to additional pods - for ordinal := int32(0); ordinal < s.replicas; ordinal++ { - f := states.Free(ordinal) - if f > 0 { - allocation := integer.Int32Min(f, diff) - newPlacements = append(newPlacements, duckv1alpha1.Placement{ - PodName: st.PodNameFromOrdinal(s.statefulSetName, ordinal), - VReplicas: allocation, - }) - - diff -= allocation - states.SetFree(ordinal, f-allocation) - } - - if diff == 0 { - break - } + podName := st.PodNameFromOrdinal(states.StatefulSetName, ordinal) + if existingPlacements.Has(podName) { + continue } + candidates[lastIdx] = ordinal + lastIdx-- } - - return newPlacements, diff + return candidates } func (s *StatefulSetScheduler) updateStatefulset(ctx context.Context, obj interface{}) { @@ -808,31 +509,17 @@ func (s *StatefulSetScheduler) updateStatefulset(ctx context.Context, obj interf func (s *StatefulSetScheduler) reservePlacements(vpod scheduler.VPod, placements []duckv1alpha1.Placement) { if len(placements) == 0 { // clear our old placements in reserved - s.reserved[vpod.GetKey()] = make(map[string]int32) + delete(s.reserved, vpod.GetKey()) + return } + s.reserved[vpod.GetKey()] = make(map[string]int32, len(placements)) + for _, p := range placements { - // note: track all vreplicas, not only the new ones since - // the next time `state()` is called some vreplicas might - // have been committed. - if _, ok := s.reserved[vpod.GetKey()]; !ok { - s.reserved[vpod.GetKey()] = make(map[string]int32) - } s.reserved[vpod.GetKey()][p.PodName] = p.VReplicas } } -func (s *StatefulSetScheduler) makeZeroPlacements(vpod scheduler.VPod, placements []duckv1alpha1.Placement) { - newPlacements := make([]duckv1alpha1.Placement, len(placements)) - for i := 0; i < len(placements); i++ { - newPlacements[i].PodName = placements[i].PodName - newPlacements[i].VReplicas = 0 - } - // This is necessary to make sure State() zeroes out initial pod/node/zone spread and - // free capacity when there are existing placements for a vpod - s.reservePlacements(vpod, newPlacements) -} - // newNotEnoughPodReplicas returns an error explaining what is the problem, what are the actions we're taking // to try to fix it (retry), wrapping a controller.requeueKeyError which signals to ReconcileKind to requeue the // object after a given delay. @@ -859,3 +546,18 @@ func (s *StatefulSetScheduler) Reserved() map[types.NamespacedName]map[string]in return r } + +func upsertPlacements(placements []duckv1alpha1.Placement, placement duckv1alpha1.Placement) []duckv1alpha1.Placement { + found := false + for i := range placements { + if placements[i].PodName == placement.PodName { + placements[i].VReplicas = placements[i].VReplicas + placement.VReplicas + found = true + break + } + } + if !found { + placements = append(placements, placement) + } + return placements +} diff --git a/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go b/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go index 3704da705b..9d7f4f09a1 100644 --- a/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go +++ b/vendor/knative.dev/eventing/test/rekt/features/broker/feature.go @@ -19,6 +19,7 @@ package broker import ( "context" "encoding/base64" + "encoding/json" "fmt" "strings" @@ -26,14 +27,18 @@ import ( "github.com/cloudevents/sdk-go/v2/binding/spec" "github.com/cloudevents/sdk-go/v2/test" "github.com/google/uuid" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/injection/clients/dynamicclient" "knative.dev/reconciler-test/pkg/environment" "knative.dev/reconciler-test/pkg/state" duckv1 "knative.dev/eventing/pkg/apis/duck/v1" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" + messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1" "knative.dev/eventing/test/rekt/features" "knative.dev/eventing/test/rekt/resources/broker" "knative.dev/eventing/test/rekt/resources/channel" + "knative.dev/eventing/test/rekt/resources/channel_impl" "knative.dev/eventing/test/rekt/resources/subscription" "knative.dev/eventing/test/rekt/resources/trigger" @@ -42,7 +47,7 @@ import ( "knative.dev/pkg/ptr" "knative.dev/reconciler-test/pkg/eventshub" - eventasssert "knative.dev/reconciler-test/pkg/eventshub/assert" + eventassert "knative.dev/reconciler-test/pkg/eventshub/assert" "knative.dev/reconciler-test/pkg/feature" "knative.dev/reconciler-test/pkg/manifest" "knative.dev/reconciler-test/pkg/resources/service" @@ -173,7 +178,7 @@ func ManyTriggers() *feature.FeatureSet { eventshub.InputEvent(eventToSend), )) - f.Assert("source sent event", eventasssert.OnStore(source). + f.Assert("source sent event", eventassert.OnStore(source). MatchSentEvent(test.HasId(eventToSend.ID())). AtLeast(1), ) @@ -185,7 +190,7 @@ func ManyTriggers() *feature.FeatureSet { // Check on every dumper whether we should expect this event or not if eventFilter.toEventMatcher()(eventToSend) == nil { f.Assert(fmt.Sprintf("%s receive event %s", sink, eventToSend.ID()), func(ctx context.Context, t feature.T) { - eventasssert.OnStore(sink). + eventassert.OnStore(sink). Match(features.HasKnNamespaceHeader(environment.FromContext(ctx).Namespace())). MatchReceivedEvent(test.HasId(eventToSend.ID())). MatchReceivedEvent(matcher). @@ -335,12 +340,12 @@ func brokerChannelFlowWithTransformation(createSubscriberFn func(ref *v1.KRefere eventshub.InputEvent(eventToSend), )) - eventMatcher := eventasssert.MatchEvent( + eventMatcher := eventassert.MatchEvent( test.HasSource(eventSource), test.HasType(eventType), test.HasData([]byte(eventBody)), ) - transformEventMatcher := eventasssert.MatchEvent( + transformEventMatcher := eventassert.MatchEvent( test.HasSource(transformedEventSource), test.HasType(transformedEventType), test.HasData([]byte(transformedBody)), @@ -348,19 +353,19 @@ func brokerChannelFlowWithTransformation(createSubscriberFn func(ref *v1.KRefere f.Stable("(Trigger1 point to) sink1 has all the events"). Must("delivers original events", - eventasssert.OnStore(sink1).Match(eventMatcher).AtLeast(1)) + eventassert.OnStore(sink1).Match(eventMatcher).AtLeast(1)) f.Stable("(Trigger2 point to) sink2 has all the events"). Must("delivers original events", - eventasssert.OnStore(sink2).Match(eventMatcher).AtLeast(1)). + eventassert.OnStore(sink2).Match(eventMatcher).AtLeast(1)). Must("delivers transformation events", - eventasssert.OnStore(sink2).Match(transformEventMatcher).AtLeast(1)) + eventassert.OnStore(sink2).Match(transformEventMatcher).AtLeast(1)) f.Stable("(Trigger3 point to) Channel's subscriber just has events after transformation"). Must("delivers transformation events", - eventasssert.OnStore(sink3).Match(transformEventMatcher).AtLeast(1)). + eventassert.OnStore(sink3).Match(transformEventMatcher).AtLeast(1)). Must("delivers original events", - eventasssert.OnStore(sink3).Match(eventMatcher).Not()) + eventassert.OnStore(sink3).Match(eventMatcher).Not()) return f } @@ -482,13 +487,13 @@ func BrokerEventTransformationForTriggerAssert(f *feature.Feature, eventshub.InputEvent(cfg.EventToSend), )) - eventMatcher := eventasssert.MatchEvent( + eventMatcher := eventassert.MatchEvent( test.HasId(cfg.EventToSend.ID()), test.HasSource(cfg.EventToSend.Source()), test.HasType(cfg.EventToSend.Type()), test.HasData(cfg.EventToSend.Data()), ) - transformEventMatcher := eventasssert.MatchEvent( + transformEventMatcher := eventassert.MatchEvent( test.HasSource(cfg.TransformedEvent.Source()), test.HasType(cfg.TransformedEvent.Type()), test.HasData(cfg.TransformedEvent.Data()), @@ -496,13 +501,13 @@ func BrokerEventTransformationForTriggerAssert(f *feature.Feature, f.Stable("Trigger has filtered all transformed events"). Must("trigger 1 delivers original events", - eventasssert.OnStore(cfg.Sink1).Match(eventMatcher).AtLeast(1)). + eventassert.OnStore(cfg.Sink1).Match(eventMatcher).AtLeast(1)). Must("trigger 1 does not deliver transformed events", - eventasssert.OnStore(cfg.Sink1).Match(transformEventMatcher).Not()). + eventassert.OnStore(cfg.Sink1).Match(transformEventMatcher).Not()). Must("trigger 2 delivers transformed events", - eventasssert.OnStore(cfg.Sink2).Match(transformEventMatcher).AtLeast(1)). + eventassert.OnStore(cfg.Sink2).Match(transformEventMatcher).AtLeast(1)). Must("trigger 2 does not deliver original events", - eventasssert.OnStore(cfg.Sink2).Match(eventMatcher).Not()) + eventassert.OnStore(cfg.Sink2).Match(eventMatcher).Not()) } func BrokerPreferHeaderCheck() *feature.Feature { @@ -544,13 +549,113 @@ func BrokerPreferHeaderCheck() *feature.Feature { f.Stable("test message without explicit prefer header should have the header"). Must("delivers events", - eventasssert.OnStore(sink).Match( - eventasssert.HasAdditionalHeader("Prefer", "reply"), + eventassert.OnStore(sink).Match( + eventassert.HasAdditionalHeader("Prefer", "reply"), ).AtLeast(1)) return f } +func PropagatesMetadata() *feature.Feature { + f := feature.NewFeatureNamed("Broker PreferHeader Check") + + if !broker.EnvCfg.IsMTChannelBasedBroker() { + f.Assert("class is not MTChannelBasedBroker, skipping", func(ctx context.Context, t feature.T) {}) + return f + } + + source := feature.MakeRandomK8sName("source") + sink := feature.MakeRandomK8sName("sink") + via := feature.MakeRandomK8sName("via") + + key := messagingv1.AsyncHandlerAnnotation + value := "false" + + event := test.FullEvent() + event.SetID(uuid.New().String()) + + //Install the broker + brokerName := feature.MakeRandomK8sName("broker") + f.Setup("install broker", broker.Install(brokerName, append(broker.WithEnvConfig(), broker.WithAnnotations( + map[string]interface{}{key: value}, + ))...)) + f.Requirement("broker is ready", broker.IsReady(brokerName)) + f.Requirement("broker is addressable", broker.IsAddressable(brokerName)) + + f.Setup("install sink", eventshub.Install(sink, eventshub.StartReceiver)) + + // Point the Trigger subscriber to the sink svc. + cfg := []manifest.CfgFn{trigger.WithSubscriber(service.AsKReference(sink), ""), trigger.WithBrokerName(brokerName)} + + // Install the trigger + f.Setup("install trigger", trigger.Install(via, cfg...)) + f.Setup("trigger goes ready", trigger.IsReady(via)) + + f.Requirement("install source", eventshub.Install( + source, + eventshub.StartSenderToResource(broker.GVR(), brokerName), + eventshub.InputEvent(event), + )) + + f.Assert("channel has annotations and labels", func(ctx context.Context, t feature.T) { + d := dynamicclient.Get(ctx) + channelsImpls, err := d.Resource(channel_impl.GVR()). + Namespace(environment.FromContext(ctx).Namespace()). + List(ctx, metav1.ListOptions{}) + if err != nil { + t.Errorf("Failed to list channels (%v): %v", channel_impl.GVR(), err) + return + } + + channels, err := d.Resource(channel.GVR()). + Namespace(environment.FromContext(ctx).Namespace()). + List(ctx, metav1.ListOptions{}) + if err != nil { + t.Errorf("Failed to list channels (%v): %v", channel.GVR(), err) + return + } + + channels.Items = append(channels.Items, channelsImpls.Items...) + + if len(channels.Items) <= 0 { + t.Errorf("No channels found for resources: %#v or %#v", channel_impl.GVR(), channel.GVR()) + } + + found := false + for _, ch := range channels.Items { + for _, or := range ch.GetOwnerReferences() { + if or.Kind == "Broker" && or.Name == brokerName { + v, ok := ch.GetAnnotations()[key] + if !ok { + t.Errorf("Failed to find async handler annotation:\n%#v", ch) + return + } + if v != value { + t.Errorf("Failed to find expected '%s' value for annotation '%s':\n%#v", value, key, ch) + return + } + found = true + break + } + } + } + if !found { + bytes, _ := json.MarshalIndent(channels, "", " ") + t.Errorf("No channel found associated with broker %q\n%#v", brokerName, string(bytes)) + } + }) + f.Assert("event sent", eventassert.OnStore(source). + MatchSentEvent(test.HasId(event.ID())). + AtLeast(1), + ) + f.Assert("event received", eventassert.OnStore(sink). + MatchReceivedEvent(test.HasId(event.ID())). + AtLeast(1), + ) + + return f +} + func BrokerRedelivery() *feature.FeatureSet { fs := &feature.FeatureSet{ Name: "Knative Broker - Redelivery - with different sequences", @@ -606,9 +711,9 @@ func brokerRedeliveryFibonacci(retryNum int32) *feature.Feature { f.Stable("Broker Redelivery following the fibonacci sequence"). Must("delivers events", - eventasssert.OnStore(sink).Match( - eventasssert.MatchKind(eventasssert.EventReceived), - eventasssert.MatchEvent( + eventassert.OnStore(sink).Match( + eventassert.MatchKind(eventassert.EventReceived), + eventassert.MatchEvent( test.HasSource(eventSource), test.HasType(eventType), test.HasData([]byte(eventBody)), @@ -662,11 +767,11 @@ func brokerRedeliveryDropN(retryNum int32, dropNum uint) *feature.Feature { f.Stable("Broker Redelivery failed the first n events"). Must("delivers events", func(ctx context.Context, t feature.T) { - eventasssert.OnStore(sink). + eventassert.OnStore(sink). Match(features.HasKnNamespaceHeader(environment.FromContext(ctx).Namespace())). Match( - eventasssert.MatchKind(eventasssert.EventReceived), - eventasssert.MatchEvent( + eventassert.MatchKind(eventassert.EventReceived), + eventassert.MatchEvent( test.HasSource(eventSource), test.HasType(eventType), test.HasData([]byte(eventBody)), @@ -734,7 +839,7 @@ func brokerSubscriberUnreachable() *feature.Feature { f.Assert("Receives dls extensions when subscriber is unreachable", func(ctx context.Context, t feature.T) { - eventasssert.OnStore(sink). + eventassert.OnStore(sink). Match(features.HasKnNamespaceHeader(environment.FromContext(ctx).Namespace())). MatchEvent( test.HasExtension("knativeerrordest", subscriberUri), @@ -880,8 +985,8 @@ func assertEnhancedWithKnativeErrorExtensions(sinkName string, matcherfns ...fun ctx, t, 1, - eventasssert.MatchKind(eventshub.EventReceived), - eventasssert.MatchEvent(matchers...), + eventassert.MatchKind(eventshub.EventReceived), + eventassert.MatchEvent(matchers...), ) } } @@ -936,7 +1041,7 @@ func brokerSubscriberLongMessage() *feature.Feature { )) f.Assert("receive long event on sink exactly once", - eventasssert.OnStore(sink). + eventassert.OnStore(sink). MatchEvent(test.HasData([]byte(eventBody))). Exact(1), ) @@ -1021,13 +1126,13 @@ func brokerSubscriberLongResponseMessage() *feature.Feature { )) f.Assert("receive long event on sink1 exactly once", - eventasssert.OnStore(sink1). + eventassert.OnStore(sink1). MatchEvent(test.HasData([]byte(eventBody))). Exact(1), ) f.Assert("receive long event on sink2 exactly once", - eventasssert.OnStore(sink2). + eventassert.OnStore(sink2). MatchEvent(test.HasData([]byte(transformedEventBody))). Exact(1), ) diff --git a/vendor/knative.dev/eventing/test/rekt/features/channel/features.go b/vendor/knative.dev/eventing/test/rekt/features/channel/features.go index da8a6e9bf5..22178a0720 100644 --- a/vendor/knative.dev/eventing/test/rekt/features/channel/features.go +++ b/vendor/knative.dev/eventing/test/rekt/features/channel/features.go @@ -25,6 +25,7 @@ import ( "github.com/cloudevents/sdk-go/v2/binding" "github.com/cloudevents/sdk-go/v2/test" "github.com/google/uuid" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" duckv1 "knative.dev/pkg/apis/duck/v1" "knative.dev/pkg/network" "knative.dev/reconciler-test/pkg/environment" @@ -37,6 +38,7 @@ import ( eventasssert "knative.dev/reconciler-test/pkg/eventshub/assert" + v1 "knative.dev/eventing/pkg/apis/messaging/v1" "knative.dev/eventing/test/rekt/features" "knative.dev/eventing/test/rekt/resources/channel" "knative.dev/eventing/test/rekt/resources/channel_impl" @@ -134,6 +136,93 @@ func DeadLetterSink(createSubscriberFn func(ref *duckv1.KReference, uri string) return f } +func AsyncHandler(createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) *feature.Feature { + f := feature.NewFeature() + sink := feature.MakeRandomK8sName("sink") + source := feature.MakeRandomK8sName("source") + name := feature.MakeRandomK8sName("channel") + sub := feature.MakeRandomK8sName("subscription") + + event := test.FullEvent() + event.SetID(uuid.New().String()) + + f.Setup("install sink", eventshub.Install(sink, eventshub.StartReceiver)) + f.Setup("install channel", channel_impl.Install(name, channel_impl.WithAnnotations(map[string]interface{}{ + v1.AsyncHandlerAnnotation: "true", + }))) + f.Setup("install subscription", subscription.Install(sub, + subscription.WithChannel(channel_impl.AsRef(name)), + createSubscriberFn(service.AsKReference(sink), ""), + )) + f.Setup("channel is ready", channel_impl.IsReady(name)) + f.Setup("subscription is ready", subscription.IsReady(sub)) + + f.Requirement("install source", eventshub.Install(source, eventshub.InputEvent(event), eventshub.StartSenderToResource(channel_impl.GVR(), name))) + + f.Assert("Event sent", assert.OnStore(source). + MatchSentEvent(test.HasId(event.ID())). + AtLeast(1), + ) + f.Assert("sink receives event", assert.OnStore(sink). + MatchEvent(test.HasId(event.ID())). + AtLeast(1), + ) + + return f +} + +func AsyncHandlerUpdate(createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) *feature.Feature { + f := feature.NewFeature() + sink := feature.MakeRandomK8sName("sink") + source := feature.MakeRandomK8sName("source") + name := feature.MakeRandomK8sName("channel") + sub := feature.MakeRandomK8sName("subscription") + + event := test.FullEvent() + event.SetID(uuid.New().String()) + + f.Setup("install sink", eventshub.Install(sink, eventshub.StartReceiver)) + f.Setup("install channel", channel_impl.Install(name, channel_impl.WithAnnotations(map[string]interface{}{ + v1.AsyncHandlerAnnotation: "true", + }))) + f.Setup("install subscription", subscription.Install(sub, + subscription.WithChannel(channel_impl.AsRef(name)), + createSubscriberFn(service.AsKReference(sink), ""), + )) + f.Setup("channel is ready", channel_impl.IsReady(name)) + f.Setup("subscription is ready", subscription.IsReady(sub)) + + f.Requirement("update channel async handler", func(ctx context.Context, t feature.T) { + dc := Client(ctx) + + imc, err := dc.ChannelImpl.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve InMemoryChannel: %v", err) + } + // swap and update it to false + imc.SetAnnotations(map[string]string{ + v1.AsyncHandlerAnnotation: "true", + }) + if _, err := dc.ChannelImpl.Update(ctx, imc, metav1.UpdateOptions{}); err != nil { + t.Fatalf("Failed to update async handler annotation: %v", err) + } + }) + + f.Requirement("channel is ready", channel_impl.IsReady(name)) + f.Requirement("install source", eventshub.Install(source, eventshub.InputEvent(event), eventshub.StartSenderToResource(channel_impl.GVR(), name))) + + f.Assert("Event sent", assert.OnStore(source). + MatchSentEvent(test.HasId(event.ID())). + AtLeast(1), + ) + f.Assert("sink receives event", assert.OnStore(sink). + MatchEvent(test.HasId(event.ID())). + AtLeast(1), + ) + + return f +} + func DeadLetterSinkGenericChannel(createSubscriberFn func(ref *duckv1.KReference, uri string) manifest.CfgFn) *feature.Feature { f := feature.NewFeature() sink := feature.MakeRandomK8sName("sink") diff --git a/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.go b/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.go index 81c170ad0d..7bd1e4ce34 100644 --- a/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.go +++ b/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.go @@ -51,6 +51,10 @@ type EnvConfig struct { BrokerTemplatesDir string `envconfig:"BROKER_TEMPLATES"` } +func (cfg EnvConfig) IsMTChannelBasedBroker() bool { + return cfg.BrokerClass == "" || cfg.BrokerClass == "MTChannelBasedBroker" +} + func init() { // Process EventingGlobal. if err := envconfig.Process("", &EnvCfg); err != nil { diff --git a/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.yaml b/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.yaml index ec73900e5b..ba28e55715 100644 --- a/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.yaml +++ b/vendor/knative.dev/eventing/test/rekt/resources/broker/broker.yaml @@ -24,7 +24,7 @@ metadata: {{ end }} {{ if .annotations }} {{ range $key, $value := .annotations }} - {{ $key }}: {{ $value }} + {{ $key }}: "{{ $value }}" {{ end }} {{ end }} {{ end }} diff --git a/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.go b/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.go index 93d51230a6..7766cd03f6 100644 --- a/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.go +++ b/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.go @@ -173,6 +173,8 @@ func AsDestinationRef(name string) *duckv1.Destination { // WithDeadLetterSink adds the dead letter sink related config to a Subscription spec. var WithDeadLetterSink = delivery.WithDeadLetterSink +var WithAnnotations = manifest.WithAnnotations + // ValidateAddress validates the address retured by Address func ValidateAddress(name string, validate addressable.ValidateAddressFn, timings ...time.Duration) feature.StepFn { return addressable.ValidateAddress(GVR(), name, validate, timings...) diff --git a/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.yaml b/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.yaml index 34eb667ca9..78a042beed 100644 --- a/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.yaml +++ b/vendor/knative.dev/eventing/test/rekt/resources/channel_impl/channel_impl.yaml @@ -17,6 +17,12 @@ kind: {{ .kind }} metadata: name: {{ .name }} namespace: {{ .namespace }} + {{ if .annotations }} + annotations: + {{ range $key, $value := .annotations }} + {{ $key }}: "{{ $value }}" + {{ end }} + {{ end }} spec: {{ if .delivery }} delivery: diff --git a/vendor/knative.dev/hack/infra-library.sh b/vendor/knative.dev/hack/infra-library.sh index 83eacfa66c..ba5b181809 100644 --- a/vendor/knative.dev/hack/infra-library.sh +++ b/vendor/knative.dev/hack/infra-library.sh @@ -21,7 +21,7 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/library.sh" # Default Kubernetes version to use for GKE, if not overridden with # the `--cluster-version` parameter. -readonly GKE_DEFAULT_CLUSTER_VERSION="1.28" +readonly GKE_DEFAULT_CLUSTER_VERSION="1.30" # Dumps the k8s api server metrics. Spins up a proxy, waits a little bit and # dumps the metrics to ${ARTIFACTS}/k8s.metrics.txt diff --git a/vendor/knative.dev/pkg/hack/update-codegen.sh b/vendor/knative.dev/pkg/hack/update-codegen.sh index e56abb7fc4..4f9ffea2a6 100644 --- a/vendor/knative.dev/pkg/hack/update-codegen.sh +++ b/vendor/knative.dev/pkg/hack/update-codegen.sh @@ -43,7 +43,7 @@ ${REPO_ROOT_DIR}/hack/generate-knative.sh "injection" \ K8S_TYPES=$(find ./vendor/k8s.io/api -type d -path '*/*/*/*/*/*' | cut -d'/' -f 5-6 | sort | sed 's@/@:@g' | grep -v "abac:" | \ grep -v "admission:" | \ - grep -v "admissionregistration:" \ + grep -v "admissionregistration:" | \ grep -v "componentconfig:" | \ grep -v "imagepolicy:" | \ grep -v "resource:" | \ diff --git a/vendor/modules.txt b/vendor/modules.txt index 78ded2989a..31d55cb8f0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -445,12 +445,12 @@ go.uber.org/zap/zaptest ## explicit; go 1.20 golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 -# golang.org/x/mod v0.21.0 +# golang.org/x/mod v0.22.0 ## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.30.0 +# golang.org/x/net v0.31.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -461,7 +461,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -486,7 +486,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.6.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.26.0 +# golang.org/x/tools v0.27.0 ## explicit; go 1.22.0 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -515,14 +515,14 @@ gomodules.xyz/jsonpatch/v2 # google.golang.org/api v0.183.0 ## explicit; go 1.20 google.golang.org/api/support/bundler -# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 +# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.1 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.0 +## explicit; go 1.22.7 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -530,6 +530,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -579,7 +581,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.35.1 +# google.golang.org/protobuf v1.35.2 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -1167,12 +1169,13 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/eventing v0.43.1-0.20241028083747-ef6b31a697e7 -## explicit; go 1.22.0 +# knative.dev/eventing v0.43.1-0.20241223131119-c9047a198255 +## explicit; go 1.22.7 knative.dev/eventing/cmd/event_display knative.dev/eventing/cmd/heartbeats knative.dev/eventing/hack knative.dev/eventing/pkg/apis +knative.dev/eventing/pkg/apis/common/integration/v1alpha1 knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/duck knative.dev/eventing/pkg/apis/duck/v1 @@ -1195,6 +1198,7 @@ knative.dev/eventing/pkg/apis/sinks/v1alpha1 knative.dev/eventing/pkg/apis/sources knative.dev/eventing/pkg/apis/sources/config knative.dev/eventing/pkg/apis/sources/v1 +knative.dev/eventing/pkg/apis/sources/v1alpha1 knative.dev/eventing/pkg/apis/sources/v1beta2 knative.dev/eventing/pkg/auth knative.dev/eventing/pkg/broker @@ -1219,6 +1223,8 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1 knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/fake knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1 knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/fake +knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1 +knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2 knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake knative.dev/eventing/pkg/client/informers/externalversions @@ -1237,6 +1243,7 @@ knative.dev/eventing/pkg/client/informers/externalversions/sinks knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1 knative.dev/eventing/pkg/client/informers/externalversions/sources knative.dev/eventing/pkg/client/informers/externalversions/sources/v1 +knative.dev/eventing/pkg/client/informers/externalversions/sources/v1alpha1 knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2 knative.dev/eventing/pkg/client/injection/client knative.dev/eventing/pkg/client/injection/client/fake @@ -1261,6 +1268,7 @@ knative.dev/eventing/pkg/client/listers/flows/v1 knative.dev/eventing/pkg/client/listers/messaging/v1 knative.dev/eventing/pkg/client/listers/sinks/v1alpha1 knative.dev/eventing/pkg/client/listers/sources/v1 +knative.dev/eventing/pkg/client/listers/sources/v1alpha1 knative.dev/eventing/pkg/client/listers/sources/v1beta2 knative.dev/eventing/pkg/crossnamespace knative.dev/eventing/pkg/duck @@ -1277,17 +1285,6 @@ knative.dev/eventing/pkg/reconciler/testing knative.dev/eventing/pkg/reconciler/testing/scheme knative.dev/eventing/pkg/reconciler/testing/v1 knative.dev/eventing/pkg/scheduler -knative.dev/eventing/pkg/scheduler/factory -knative.dev/eventing/pkg/scheduler/plugins/core/availabilitynodepriority -knative.dev/eventing/pkg/scheduler/plugins/core/availabilityzonepriority -knative.dev/eventing/pkg/scheduler/plugins/core/evenpodspread -knative.dev/eventing/pkg/scheduler/plugins/core/lowestordinalpriority -knative.dev/eventing/pkg/scheduler/plugins/core/podfitsresources -knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilitynodepriority -knative.dev/eventing/pkg/scheduler/plugins/core/removewithavailabilityzonepriority -knative.dev/eventing/pkg/scheduler/plugins/core/removewithevenpodspreadpriority -knative.dev/eventing/pkg/scheduler/plugins/core/removewithhighestordinalpriority -knative.dev/eventing/pkg/scheduler/plugins/kafka/nomaxresourcecount knative.dev/eventing/pkg/scheduler/state knative.dev/eventing/pkg/scheduler/statefulset knative.dev/eventing/pkg/tracing @@ -1353,11 +1350,11 @@ knative.dev/eventing/test/upgrade/prober/wathola/fetcher knative.dev/eventing/test/upgrade/prober/wathola/forwarder knative.dev/eventing/test/upgrade/prober/wathola/receiver knative.dev/eventing/test/upgrade/prober/wathola/sender -# knative.dev/hack v0.0.0-20241025103803-ef6e7e983a60 +# knative.dev/hack v0.0.0-20241227080210-e92a16ae0893 ## explicit; go 1.21 knative.dev/hack -# knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 -## explicit; go 1.22.0 +# knative.dev/pkg v0.0.0-20241223131119-4c901591eb4a +## explicit; go 1.22.7 knative.dev/pkg/apiextensions/storageversion knative.dev/pkg/apiextensions/storageversion/cmd/migrate knative.dev/pkg/apis @@ -1476,8 +1473,8 @@ knative.dev/pkg/webhook/resourcesemantics knative.dev/pkg/webhook/resourcesemantics/conversion knative.dev/pkg/webhook/resourcesemantics/defaulting knative.dev/pkg/webhook/resourcesemantics/validation -# knative.dev/reconciler-test v0.0.0-20241024141702-aae114c1c0e3 -## explicit; go 1.22.0 +# knative.dev/reconciler-test v0.0.0-20241223131247-96258bea6ce4 +## explicit; go 1.22.7 knative.dev/reconciler-test/cmd/eventshub knative.dev/reconciler-test/pkg/environment knative.dev/reconciler-test/pkg/eventshub